Btrfs: make sure we update latest_bdev
[firefly-linux-kernel-4.4.55.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/kthread.h>
27 #include <asm/div64.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37
38 static int init_first_rw_device(struct btrfs_trans_handle *trans,
39                                 struct btrfs_root *root,
40                                 struct btrfs_device *device);
41 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
42
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
45
46 static void lock_chunks(struct btrfs_root *root)
47 {
48         mutex_lock(&root->fs_info->chunk_mutex);
49 }
50
51 static void unlock_chunks(struct btrfs_root *root)
52 {
53         mutex_unlock(&root->fs_info->chunk_mutex);
54 }
55
56 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
57 {
58         struct btrfs_device *device;
59         WARN_ON(fs_devices->opened);
60         while (!list_empty(&fs_devices->devices)) {
61                 device = list_entry(fs_devices->devices.next,
62                                     struct btrfs_device, dev_list);
63                 list_del(&device->dev_list);
64                 kfree(device->name);
65                 kfree(device);
66         }
67         kfree(fs_devices);
68 }
69
70 int btrfs_cleanup_fs_uuids(void)
71 {
72         struct btrfs_fs_devices *fs_devices;
73
74         while (!list_empty(&fs_uuids)) {
75                 fs_devices = list_entry(fs_uuids.next,
76                                         struct btrfs_fs_devices, list);
77                 list_del(&fs_devices->list);
78                 free_fs_devices(fs_devices);
79         }
80         return 0;
81 }
82
83 static noinline struct btrfs_device *__find_device(struct list_head *head,
84                                                    u64 devid, u8 *uuid)
85 {
86         struct btrfs_device *dev;
87
88         list_for_each_entry(dev, head, dev_list) {
89                 if (dev->devid == devid &&
90                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
91                         return dev;
92                 }
93         }
94         return NULL;
95 }
96
97 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
98 {
99         struct btrfs_fs_devices *fs_devices;
100
101         list_for_each_entry(fs_devices, &fs_uuids, list) {
102                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
103                         return fs_devices;
104         }
105         return NULL;
106 }
107
108 static void requeue_list(struct btrfs_pending_bios *pending_bios,
109                         struct bio *head, struct bio *tail)
110 {
111
112         struct bio *old_head;
113
114         old_head = pending_bios->head;
115         pending_bios->head = head;
116         if (pending_bios->tail)
117                 tail->bi_next = old_head;
118         else
119                 pending_bios->tail = tail;
120 }
121
122 /*
123  * we try to collect pending bios for a device so we don't get a large
124  * number of procs sending bios down to the same device.  This greatly
125  * improves the schedulers ability to collect and merge the bios.
126  *
127  * But, it also turns into a long list of bios to process and that is sure
128  * to eventually make the worker thread block.  The solution here is to
129  * make some progress and then put this work struct back at the end of
130  * the list if the block device is congested.  This way, multiple devices
131  * can make progress from a single worker thread.
132  */
133 static noinline int run_scheduled_bios(struct btrfs_device *device)
134 {
135         struct bio *pending;
136         struct backing_dev_info *bdi;
137         struct btrfs_fs_info *fs_info;
138         struct btrfs_pending_bios *pending_bios;
139         struct bio *tail;
140         struct bio *cur;
141         int again = 0;
142         unsigned long num_run;
143         unsigned long batch_run = 0;
144         unsigned long limit;
145         unsigned long last_waited = 0;
146         int force_reg = 0;
147         int sync_pending = 0;
148         struct blk_plug plug;
149
150         /*
151          * this function runs all the bios we've collected for
152          * a particular device.  We don't want to wander off to
153          * another device without first sending all of these down.
154          * So, setup a plug here and finish it off before we return
155          */
156         blk_start_plug(&plug);
157
158         bdi = blk_get_backing_dev_info(device->bdev);
159         fs_info = device->dev_root->fs_info;
160         limit = btrfs_async_submit_limit(fs_info);
161         limit = limit * 2 / 3;
162
163 loop:
164         spin_lock(&device->io_lock);
165
166 loop_lock:
167         num_run = 0;
168
169         /* take all the bios off the list at once and process them
170          * later on (without the lock held).  But, remember the
171          * tail and other pointers so the bios can be properly reinserted
172          * into the list if we hit congestion
173          */
174         if (!force_reg && device->pending_sync_bios.head) {
175                 pending_bios = &device->pending_sync_bios;
176                 force_reg = 1;
177         } else {
178                 pending_bios = &device->pending_bios;
179                 force_reg = 0;
180         }
181
182         pending = pending_bios->head;
183         tail = pending_bios->tail;
184         WARN_ON(pending && !tail);
185
186         /*
187          * if pending was null this time around, no bios need processing
188          * at all and we can stop.  Otherwise it'll loop back up again
189          * and do an additional check so no bios are missed.
190          *
191          * device->running_pending is used to synchronize with the
192          * schedule_bio code.
193          */
194         if (device->pending_sync_bios.head == NULL &&
195             device->pending_bios.head == NULL) {
196                 again = 0;
197                 device->running_pending = 0;
198         } else {
199                 again = 1;
200                 device->running_pending = 1;
201         }
202
203         pending_bios->head = NULL;
204         pending_bios->tail = NULL;
205
206         spin_unlock(&device->io_lock);
207
208         while (pending) {
209
210                 rmb();
211                 /* we want to work on both lists, but do more bios on the
212                  * sync list than the regular list
213                  */
214                 if ((num_run > 32 &&
215                     pending_bios != &device->pending_sync_bios &&
216                     device->pending_sync_bios.head) ||
217                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
218                     device->pending_bios.head)) {
219                         spin_lock(&device->io_lock);
220                         requeue_list(pending_bios, pending, tail);
221                         goto loop_lock;
222                 }
223
224                 cur = pending;
225                 pending = pending->bi_next;
226                 cur->bi_next = NULL;
227                 atomic_dec(&fs_info->nr_async_bios);
228
229                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
230                     waitqueue_active(&fs_info->async_submit_wait))
231                         wake_up(&fs_info->async_submit_wait);
232
233                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
234
235                 /*
236                  * if we're doing the sync list, record that our
237                  * plug has some sync requests on it
238                  *
239                  * If we're doing the regular list and there are
240                  * sync requests sitting around, unplug before
241                  * we add more
242                  */
243                 if (pending_bios == &device->pending_sync_bios) {
244                         sync_pending = 1;
245                 } else if (sync_pending) {
246                         blk_finish_plug(&plug);
247                         blk_start_plug(&plug);
248                         sync_pending = 0;
249                 }
250
251                 btrfsic_submit_bio(cur->bi_rw, cur);
252                 num_run++;
253                 batch_run++;
254                 if (need_resched())
255                         cond_resched();
256
257                 /*
258                  * we made progress, there is more work to do and the bdi
259                  * is now congested.  Back off and let other work structs
260                  * run instead
261                  */
262                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
263                     fs_info->fs_devices->open_devices > 1) {
264                         struct io_context *ioc;
265
266                         ioc = current->io_context;
267
268                         /*
269                          * the main goal here is that we don't want to
270                          * block if we're going to be able to submit
271                          * more requests without blocking.
272                          *
273                          * This code does two great things, it pokes into
274                          * the elevator code from a filesystem _and_
275                          * it makes assumptions about how batching works.
276                          */
277                         if (ioc && ioc->nr_batch_requests > 0 &&
278                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
279                             (last_waited == 0 ||
280                              ioc->last_waited == last_waited)) {
281                                 /*
282                                  * we want to go through our batch of
283                                  * requests and stop.  So, we copy out
284                                  * the ioc->last_waited time and test
285                                  * against it before looping
286                                  */
287                                 last_waited = ioc->last_waited;
288                                 if (need_resched())
289                                         cond_resched();
290                                 continue;
291                         }
292                         spin_lock(&device->io_lock);
293                         requeue_list(pending_bios, pending, tail);
294                         device->running_pending = 1;
295
296                         spin_unlock(&device->io_lock);
297                         btrfs_requeue_work(&device->work);
298                         goto done;
299                 }
300                 /* unplug every 64 requests just for good measure */
301                 if (batch_run % 64 == 0) {
302                         blk_finish_plug(&plug);
303                         blk_start_plug(&plug);
304                         sync_pending = 0;
305                 }
306         }
307
308         cond_resched();
309         if (again)
310                 goto loop;
311
312         spin_lock(&device->io_lock);
313         if (device->pending_bios.head || device->pending_sync_bios.head)
314                 goto loop_lock;
315         spin_unlock(&device->io_lock);
316
317 done:
318         blk_finish_plug(&plug);
319         return 0;
320 }
321
322 static void pending_bios_fn(struct btrfs_work *work)
323 {
324         struct btrfs_device *device;
325
326         device = container_of(work, struct btrfs_device, work);
327         run_scheduled_bios(device);
328 }
329
330 static noinline int device_list_add(const char *path,
331                            struct btrfs_super_block *disk_super,
332                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
333 {
334         struct btrfs_device *device;
335         struct btrfs_fs_devices *fs_devices;
336         u64 found_transid = btrfs_super_generation(disk_super);
337         char *name;
338
339         fs_devices = find_fsid(disk_super->fsid);
340         if (!fs_devices) {
341                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
342                 if (!fs_devices)
343                         return -ENOMEM;
344                 INIT_LIST_HEAD(&fs_devices->devices);
345                 INIT_LIST_HEAD(&fs_devices->alloc_list);
346                 list_add(&fs_devices->list, &fs_uuids);
347                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
348                 fs_devices->latest_devid = devid;
349                 fs_devices->latest_trans = found_transid;
350                 mutex_init(&fs_devices->device_list_mutex);
351                 device = NULL;
352         } else {
353                 device = __find_device(&fs_devices->devices, devid,
354                                        disk_super->dev_item.uuid);
355         }
356         if (!device) {
357                 if (fs_devices->opened)
358                         return -EBUSY;
359
360                 device = kzalloc(sizeof(*device), GFP_NOFS);
361                 if (!device) {
362                         /* we can safely leave the fs_devices entry around */
363                         return -ENOMEM;
364                 }
365                 device->devid = devid;
366                 device->work.func = pending_bios_fn;
367                 memcpy(device->uuid, disk_super->dev_item.uuid,
368                        BTRFS_UUID_SIZE);
369                 spin_lock_init(&device->io_lock);
370                 device->name = kstrdup(path, GFP_NOFS);
371                 if (!device->name) {
372                         kfree(device);
373                         return -ENOMEM;
374                 }
375                 INIT_LIST_HEAD(&device->dev_alloc_list);
376
377                 /* init readahead state */
378                 spin_lock_init(&device->reada_lock);
379                 device->reada_curr_zone = NULL;
380                 atomic_set(&device->reada_in_flight, 0);
381                 device->reada_next = 0;
382                 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
383                 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
384
385                 mutex_lock(&fs_devices->device_list_mutex);
386                 list_add_rcu(&device->dev_list, &fs_devices->devices);
387                 mutex_unlock(&fs_devices->device_list_mutex);
388
389                 device->fs_devices = fs_devices;
390                 fs_devices->num_devices++;
391         } else if (!device->name || strcmp(device->name, path)) {
392                 name = kstrdup(path, GFP_NOFS);
393                 if (!name)
394                         return -ENOMEM;
395                 kfree(device->name);
396                 device->name = name;
397                 if (device->missing) {
398                         fs_devices->missing_devices--;
399                         device->missing = 0;
400                 }
401         }
402
403         if (found_transid > fs_devices->latest_trans) {
404                 fs_devices->latest_devid = devid;
405                 fs_devices->latest_trans = found_transid;
406         }
407         *fs_devices_ret = fs_devices;
408         return 0;
409 }
410
411 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
412 {
413         struct btrfs_fs_devices *fs_devices;
414         struct btrfs_device *device;
415         struct btrfs_device *orig_dev;
416
417         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
418         if (!fs_devices)
419                 return ERR_PTR(-ENOMEM);
420
421         INIT_LIST_HEAD(&fs_devices->devices);
422         INIT_LIST_HEAD(&fs_devices->alloc_list);
423         INIT_LIST_HEAD(&fs_devices->list);
424         mutex_init(&fs_devices->device_list_mutex);
425         fs_devices->latest_devid = orig->latest_devid;
426         fs_devices->latest_trans = orig->latest_trans;
427         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
428
429         /* We have held the volume lock, it is safe to get the devices. */
430         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
431                 device = kzalloc(sizeof(*device), GFP_NOFS);
432                 if (!device)
433                         goto error;
434
435                 device->name = kstrdup(orig_dev->name, GFP_NOFS);
436                 if (!device->name) {
437                         kfree(device);
438                         goto error;
439                 }
440
441                 device->devid = orig_dev->devid;
442                 device->work.func = pending_bios_fn;
443                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
444                 spin_lock_init(&device->io_lock);
445                 INIT_LIST_HEAD(&device->dev_list);
446                 INIT_LIST_HEAD(&device->dev_alloc_list);
447
448                 list_add(&device->dev_list, &fs_devices->devices);
449                 device->fs_devices = fs_devices;
450                 fs_devices->num_devices++;
451         }
452         return fs_devices;
453 error:
454         free_fs_devices(fs_devices);
455         return ERR_PTR(-ENOMEM);
456 }
457
458 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459 {
460         struct btrfs_device *device, *next;
461
462         struct block_device *latest_bdev = NULL;
463         u64 latest_devid = 0;
464         u64 latest_transid = 0;
465
466         mutex_lock(&uuid_mutex);
467 again:
468         /* This is the initialized path, it is safe to release the devices. */
469         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
470                 if (device->in_fs_metadata) {
471                         if (!latest_transid ||
472                             device->generation > latest_transid) {
473                                 latest_devid = device->devid;
474                                 latest_transid = device->generation;
475                                 latest_bdev = device->bdev;
476                         }
477                         continue;
478                 }
479
480                 if (device->bdev) {
481                         blkdev_put(device->bdev, device->mode);
482                         device->bdev = NULL;
483                         fs_devices->open_devices--;
484                 }
485                 if (device->writeable) {
486                         list_del_init(&device->dev_alloc_list);
487                         device->writeable = 0;
488                         fs_devices->rw_devices--;
489                 }
490                 list_del_init(&device->dev_list);
491                 fs_devices->num_devices--;
492                 kfree(device->name);
493                 kfree(device);
494         }
495
496         if (fs_devices->seed) {
497                 fs_devices = fs_devices->seed;
498                 goto again;
499         }
500
501         fs_devices->latest_bdev = latest_bdev;
502         fs_devices->latest_devid = latest_devid;
503         fs_devices->latest_trans = latest_transid;
504
505         mutex_unlock(&uuid_mutex);
506         return 0;
507 }
508
509 static void __free_device(struct work_struct *work)
510 {
511         struct btrfs_device *device;
512
513         device = container_of(work, struct btrfs_device, rcu_work);
514
515         if (device->bdev)
516                 blkdev_put(device->bdev, device->mode);
517
518         kfree(device->name);
519         kfree(device);
520 }
521
522 static void free_device(struct rcu_head *head)
523 {
524         struct btrfs_device *device;
525
526         device = container_of(head, struct btrfs_device, rcu);
527
528         INIT_WORK(&device->rcu_work, __free_device);
529         schedule_work(&device->rcu_work);
530 }
531
532 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
533 {
534         struct btrfs_device *device;
535
536         if (--fs_devices->opened > 0)
537                 return 0;
538
539         mutex_lock(&fs_devices->device_list_mutex);
540         list_for_each_entry(device, &fs_devices->devices, dev_list) {
541                 struct btrfs_device *new_device;
542
543                 if (device->bdev)
544                         fs_devices->open_devices--;
545
546                 if (device->writeable) {
547                         list_del_init(&device->dev_alloc_list);
548                         fs_devices->rw_devices--;
549                 }
550
551                 if (device->can_discard)
552                         fs_devices->num_can_discard--;
553
554                 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
555                 BUG_ON(!new_device);
556                 memcpy(new_device, device, sizeof(*new_device));
557                 new_device->name = kstrdup(device->name, GFP_NOFS);
558                 BUG_ON(device->name && !new_device->name);
559                 new_device->bdev = NULL;
560                 new_device->writeable = 0;
561                 new_device->in_fs_metadata = 0;
562                 new_device->can_discard = 0;
563                 list_replace_rcu(&device->dev_list, &new_device->dev_list);
564
565                 call_rcu(&device->rcu, free_device);
566         }
567         mutex_unlock(&fs_devices->device_list_mutex);
568
569         WARN_ON(fs_devices->open_devices);
570         WARN_ON(fs_devices->rw_devices);
571         fs_devices->opened = 0;
572         fs_devices->seeding = 0;
573
574         return 0;
575 }
576
577 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
578 {
579         struct btrfs_fs_devices *seed_devices = NULL;
580         int ret;
581
582         mutex_lock(&uuid_mutex);
583         ret = __btrfs_close_devices(fs_devices);
584         if (!fs_devices->opened) {
585                 seed_devices = fs_devices->seed;
586                 fs_devices->seed = NULL;
587         }
588         mutex_unlock(&uuid_mutex);
589
590         while (seed_devices) {
591                 fs_devices = seed_devices;
592                 seed_devices = fs_devices->seed;
593                 __btrfs_close_devices(fs_devices);
594                 free_fs_devices(fs_devices);
595         }
596         return ret;
597 }
598
599 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
600                                 fmode_t flags, void *holder)
601 {
602         struct request_queue *q;
603         struct block_device *bdev;
604         struct list_head *head = &fs_devices->devices;
605         struct btrfs_device *device;
606         struct block_device *latest_bdev = NULL;
607         struct buffer_head *bh;
608         struct btrfs_super_block *disk_super;
609         u64 latest_devid = 0;
610         u64 latest_transid = 0;
611         u64 devid;
612         int seeding = 1;
613         int ret = 0;
614
615         flags |= FMODE_EXCL;
616
617         list_for_each_entry(device, head, dev_list) {
618                 if (device->bdev)
619                         continue;
620                 if (!device->name)
621                         continue;
622
623                 bdev = blkdev_get_by_path(device->name, flags, holder);
624                 if (IS_ERR(bdev)) {
625                         printk(KERN_INFO "open %s failed\n", device->name);
626                         goto error;
627                 }
628                 set_blocksize(bdev, 4096);
629
630                 bh = btrfs_read_dev_super(bdev);
631                 if (!bh)
632                         goto error_close;
633
634                 disk_super = (struct btrfs_super_block *)bh->b_data;
635                 devid = btrfs_stack_device_id(&disk_super->dev_item);
636                 if (devid != device->devid)
637                         goto error_brelse;
638
639                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
640                            BTRFS_UUID_SIZE))
641                         goto error_brelse;
642
643                 device->generation = btrfs_super_generation(disk_super);
644                 if (!latest_transid || device->generation > latest_transid) {
645                         latest_devid = devid;
646                         latest_transid = device->generation;
647                         latest_bdev = bdev;
648                 }
649
650                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
651                         device->writeable = 0;
652                 } else {
653                         device->writeable = !bdev_read_only(bdev);
654                         seeding = 0;
655                 }
656
657                 q = bdev_get_queue(bdev);
658                 if (blk_queue_discard(q)) {
659                         device->can_discard = 1;
660                         fs_devices->num_can_discard++;
661                 }
662
663                 device->bdev = bdev;
664                 device->in_fs_metadata = 0;
665                 device->mode = flags;
666
667                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
668                         fs_devices->rotating = 1;
669
670                 fs_devices->open_devices++;
671                 if (device->writeable) {
672                         fs_devices->rw_devices++;
673                         list_add(&device->dev_alloc_list,
674                                  &fs_devices->alloc_list);
675                 }
676                 brelse(bh);
677                 continue;
678
679 error_brelse:
680                 brelse(bh);
681 error_close:
682                 blkdev_put(bdev, flags);
683 error:
684                 continue;
685         }
686         if (fs_devices->open_devices == 0) {
687                 ret = -EINVAL;
688                 goto out;
689         }
690         fs_devices->seeding = seeding;
691         fs_devices->opened = 1;
692         fs_devices->latest_bdev = latest_bdev;
693         fs_devices->latest_devid = latest_devid;
694         fs_devices->latest_trans = latest_transid;
695         fs_devices->total_rw_bytes = 0;
696 out:
697         return ret;
698 }
699
700 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
701                        fmode_t flags, void *holder)
702 {
703         int ret;
704
705         mutex_lock(&uuid_mutex);
706         if (fs_devices->opened) {
707                 fs_devices->opened++;
708                 ret = 0;
709         } else {
710                 ret = __btrfs_open_devices(fs_devices, flags, holder);
711         }
712         mutex_unlock(&uuid_mutex);
713         return ret;
714 }
715
716 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
717                           struct btrfs_fs_devices **fs_devices_ret)
718 {
719         struct btrfs_super_block *disk_super;
720         struct block_device *bdev;
721         struct buffer_head *bh;
722         int ret;
723         u64 devid;
724         u64 transid;
725
726         mutex_lock(&uuid_mutex);
727
728         flags |= FMODE_EXCL;
729         bdev = blkdev_get_by_path(path, flags, holder);
730
731         if (IS_ERR(bdev)) {
732                 ret = PTR_ERR(bdev);
733                 goto error;
734         }
735
736         ret = set_blocksize(bdev, 4096);
737         if (ret)
738                 goto error_close;
739         bh = btrfs_read_dev_super(bdev);
740         if (!bh) {
741                 ret = -EINVAL;
742                 goto error_close;
743         }
744         disk_super = (struct btrfs_super_block *)bh->b_data;
745         devid = btrfs_stack_device_id(&disk_super->dev_item);
746         transid = btrfs_super_generation(disk_super);
747         if (disk_super->label[0])
748                 printk(KERN_INFO "device label %s ", disk_super->label);
749         else
750                 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
751         printk(KERN_CONT "devid %llu transid %llu %s\n",
752                (unsigned long long)devid, (unsigned long long)transid, path);
753         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
754
755         brelse(bh);
756 error_close:
757         blkdev_put(bdev, flags);
758 error:
759         mutex_unlock(&uuid_mutex);
760         return ret;
761 }
762
763 /* helper to account the used device space in the range */
764 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
765                                    u64 end, u64 *length)
766 {
767         struct btrfs_key key;
768         struct btrfs_root *root = device->dev_root;
769         struct btrfs_dev_extent *dev_extent;
770         struct btrfs_path *path;
771         u64 extent_end;
772         int ret;
773         int slot;
774         struct extent_buffer *l;
775
776         *length = 0;
777
778         if (start >= device->total_bytes)
779                 return 0;
780
781         path = btrfs_alloc_path();
782         if (!path)
783                 return -ENOMEM;
784         path->reada = 2;
785
786         key.objectid = device->devid;
787         key.offset = start;
788         key.type = BTRFS_DEV_EXTENT_KEY;
789
790         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
791         if (ret < 0)
792                 goto out;
793         if (ret > 0) {
794                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
795                 if (ret < 0)
796                         goto out;
797         }
798
799         while (1) {
800                 l = path->nodes[0];
801                 slot = path->slots[0];
802                 if (slot >= btrfs_header_nritems(l)) {
803                         ret = btrfs_next_leaf(root, path);
804                         if (ret == 0)
805                                 continue;
806                         if (ret < 0)
807                                 goto out;
808
809                         break;
810                 }
811                 btrfs_item_key_to_cpu(l, &key, slot);
812
813                 if (key.objectid < device->devid)
814                         goto next;
815
816                 if (key.objectid > device->devid)
817                         break;
818
819                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
820                         goto next;
821
822                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
823                 extent_end = key.offset + btrfs_dev_extent_length(l,
824                                                                   dev_extent);
825                 if (key.offset <= start && extent_end > end) {
826                         *length = end - start + 1;
827                         break;
828                 } else if (key.offset <= start && extent_end > start)
829                         *length += extent_end - start;
830                 else if (key.offset > start && extent_end <= end)
831                         *length += extent_end - key.offset;
832                 else if (key.offset > start && key.offset <= end) {
833                         *length += end - key.offset + 1;
834                         break;
835                 } else if (key.offset > end)
836                         break;
837
838 next:
839                 path->slots[0]++;
840         }
841         ret = 0;
842 out:
843         btrfs_free_path(path);
844         return ret;
845 }
846
847 /*
848  * find_free_dev_extent - find free space in the specified device
849  * @device:     the device which we search the free space in
850  * @num_bytes:  the size of the free space that we need
851  * @start:      store the start of the free space.
852  * @len:        the size of the free space. that we find, or the size of the max
853  *              free space if we don't find suitable free space
854  *
855  * this uses a pretty simple search, the expectation is that it is
856  * called very infrequently and that a given device has a small number
857  * of extents
858  *
859  * @start is used to store the start of the free space if we find. But if we
860  * don't find suitable free space, it will be used to store the start position
861  * of the max free space.
862  *
863  * @len is used to store the size of the free space that we find.
864  * But if we don't find suitable free space, it is used to store the size of
865  * the max free space.
866  */
867 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
868                          u64 *start, u64 *len)
869 {
870         struct btrfs_key key;
871         struct btrfs_root *root = device->dev_root;
872         struct btrfs_dev_extent *dev_extent;
873         struct btrfs_path *path;
874         u64 hole_size;
875         u64 max_hole_start;
876         u64 max_hole_size;
877         u64 extent_end;
878         u64 search_start;
879         u64 search_end = device->total_bytes;
880         int ret;
881         int slot;
882         struct extent_buffer *l;
883
884         /* FIXME use last free of some kind */
885
886         /* we don't want to overwrite the superblock on the drive,
887          * so we make sure to start at an offset of at least 1MB
888          */
889         search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
890
891         max_hole_start = search_start;
892         max_hole_size = 0;
893         hole_size = 0;
894
895         if (search_start >= search_end) {
896                 ret = -ENOSPC;
897                 goto error;
898         }
899
900         path = btrfs_alloc_path();
901         if (!path) {
902                 ret = -ENOMEM;
903                 goto error;
904         }
905         path->reada = 2;
906
907         key.objectid = device->devid;
908         key.offset = search_start;
909         key.type = BTRFS_DEV_EXTENT_KEY;
910
911         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
912         if (ret < 0)
913                 goto out;
914         if (ret > 0) {
915                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
916                 if (ret < 0)
917                         goto out;
918         }
919
920         while (1) {
921                 l = path->nodes[0];
922                 slot = path->slots[0];
923                 if (slot >= btrfs_header_nritems(l)) {
924                         ret = btrfs_next_leaf(root, path);
925                         if (ret == 0)
926                                 continue;
927                         if (ret < 0)
928                                 goto out;
929
930                         break;
931                 }
932                 btrfs_item_key_to_cpu(l, &key, slot);
933
934                 if (key.objectid < device->devid)
935                         goto next;
936
937                 if (key.objectid > device->devid)
938                         break;
939
940                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
941                         goto next;
942
943                 if (key.offset > search_start) {
944                         hole_size = key.offset - search_start;
945
946                         if (hole_size > max_hole_size) {
947                                 max_hole_start = search_start;
948                                 max_hole_size = hole_size;
949                         }
950
951                         /*
952                          * If this free space is greater than which we need,
953                          * it must be the max free space that we have found
954                          * until now, so max_hole_start must point to the start
955                          * of this free space and the length of this free space
956                          * is stored in max_hole_size. Thus, we return
957                          * max_hole_start and max_hole_size and go back to the
958                          * caller.
959                          */
960                         if (hole_size >= num_bytes) {
961                                 ret = 0;
962                                 goto out;
963                         }
964                 }
965
966                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
967                 extent_end = key.offset + btrfs_dev_extent_length(l,
968                                                                   dev_extent);
969                 if (extent_end > search_start)
970                         search_start = extent_end;
971 next:
972                 path->slots[0]++;
973                 cond_resched();
974         }
975
976         /*
977          * At this point, search_start should be the end of
978          * allocated dev extents, and when shrinking the device,
979          * search_end may be smaller than search_start.
980          */
981         if (search_end > search_start)
982                 hole_size = search_end - search_start;
983
984         if (hole_size > max_hole_size) {
985                 max_hole_start = search_start;
986                 max_hole_size = hole_size;
987         }
988
989         /* See above. */
990         if (hole_size < num_bytes)
991                 ret = -ENOSPC;
992         else
993                 ret = 0;
994
995 out:
996         btrfs_free_path(path);
997 error:
998         *start = max_hole_start;
999         if (len)
1000                 *len = max_hole_size;
1001         return ret;
1002 }
1003
1004 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1005                           struct btrfs_device *device,
1006                           u64 start)
1007 {
1008         int ret;
1009         struct btrfs_path *path;
1010         struct btrfs_root *root = device->dev_root;
1011         struct btrfs_key key;
1012         struct btrfs_key found_key;
1013         struct extent_buffer *leaf = NULL;
1014         struct btrfs_dev_extent *extent = NULL;
1015
1016         path = btrfs_alloc_path();
1017         if (!path)
1018                 return -ENOMEM;
1019
1020         key.objectid = device->devid;
1021         key.offset = start;
1022         key.type = BTRFS_DEV_EXTENT_KEY;
1023 again:
1024         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1025         if (ret > 0) {
1026                 ret = btrfs_previous_item(root, path, key.objectid,
1027                                           BTRFS_DEV_EXTENT_KEY);
1028                 if (ret)
1029                         goto out;
1030                 leaf = path->nodes[0];
1031                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1032                 extent = btrfs_item_ptr(leaf, path->slots[0],
1033                                         struct btrfs_dev_extent);
1034                 BUG_ON(found_key.offset > start || found_key.offset +
1035                        btrfs_dev_extent_length(leaf, extent) < start);
1036                 key = found_key;
1037                 btrfs_release_path(path);
1038                 goto again;
1039         } else if (ret == 0) {
1040                 leaf = path->nodes[0];
1041                 extent = btrfs_item_ptr(leaf, path->slots[0],
1042                                         struct btrfs_dev_extent);
1043         }
1044         BUG_ON(ret);
1045
1046         if (device->bytes_used > 0) {
1047                 u64 len = btrfs_dev_extent_length(leaf, extent);
1048                 device->bytes_used -= len;
1049                 spin_lock(&root->fs_info->free_chunk_lock);
1050                 root->fs_info->free_chunk_space += len;
1051                 spin_unlock(&root->fs_info->free_chunk_lock);
1052         }
1053         ret = btrfs_del_item(trans, root, path);
1054
1055 out:
1056         btrfs_free_path(path);
1057         return ret;
1058 }
1059
1060 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1061                            struct btrfs_device *device,
1062                            u64 chunk_tree, u64 chunk_objectid,
1063                            u64 chunk_offset, u64 start, u64 num_bytes)
1064 {
1065         int ret;
1066         struct btrfs_path *path;
1067         struct btrfs_root *root = device->dev_root;
1068         struct btrfs_dev_extent *extent;
1069         struct extent_buffer *leaf;
1070         struct btrfs_key key;
1071
1072         WARN_ON(!device->in_fs_metadata);
1073         path = btrfs_alloc_path();
1074         if (!path)
1075                 return -ENOMEM;
1076
1077         key.objectid = device->devid;
1078         key.offset = start;
1079         key.type = BTRFS_DEV_EXTENT_KEY;
1080         ret = btrfs_insert_empty_item(trans, root, path, &key,
1081                                       sizeof(*extent));
1082         BUG_ON(ret);
1083
1084         leaf = path->nodes[0];
1085         extent = btrfs_item_ptr(leaf, path->slots[0],
1086                                 struct btrfs_dev_extent);
1087         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1088         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1089         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1090
1091         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1092                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1093                     BTRFS_UUID_SIZE);
1094
1095         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1096         btrfs_mark_buffer_dirty(leaf);
1097         btrfs_free_path(path);
1098         return ret;
1099 }
1100
1101 static noinline int find_next_chunk(struct btrfs_root *root,
1102                                     u64 objectid, u64 *offset)
1103 {
1104         struct btrfs_path *path;
1105         int ret;
1106         struct btrfs_key key;
1107         struct btrfs_chunk *chunk;
1108         struct btrfs_key found_key;
1109
1110         path = btrfs_alloc_path();
1111         if (!path)
1112                 return -ENOMEM;
1113
1114         key.objectid = objectid;
1115         key.offset = (u64)-1;
1116         key.type = BTRFS_CHUNK_ITEM_KEY;
1117
1118         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1119         if (ret < 0)
1120                 goto error;
1121
1122         BUG_ON(ret == 0);
1123
1124         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1125         if (ret) {
1126                 *offset = 0;
1127         } else {
1128                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1129                                       path->slots[0]);
1130                 if (found_key.objectid != objectid)
1131                         *offset = 0;
1132                 else {
1133                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1134                                                struct btrfs_chunk);
1135                         *offset = found_key.offset +
1136                                 btrfs_chunk_length(path->nodes[0], chunk);
1137                 }
1138         }
1139         ret = 0;
1140 error:
1141         btrfs_free_path(path);
1142         return ret;
1143 }
1144
1145 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1146 {
1147         int ret;
1148         struct btrfs_key key;
1149         struct btrfs_key found_key;
1150         struct btrfs_path *path;
1151
1152         root = root->fs_info->chunk_root;
1153
1154         path = btrfs_alloc_path();
1155         if (!path)
1156                 return -ENOMEM;
1157
1158         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1159         key.type = BTRFS_DEV_ITEM_KEY;
1160         key.offset = (u64)-1;
1161
1162         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1163         if (ret < 0)
1164                 goto error;
1165
1166         BUG_ON(ret == 0);
1167
1168         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1169                                   BTRFS_DEV_ITEM_KEY);
1170         if (ret) {
1171                 *objectid = 1;
1172         } else {
1173                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1174                                       path->slots[0]);
1175                 *objectid = found_key.offset + 1;
1176         }
1177         ret = 0;
1178 error:
1179         btrfs_free_path(path);
1180         return ret;
1181 }
1182
1183 /*
1184  * the device information is stored in the chunk root
1185  * the btrfs_device struct should be fully filled in
1186  */
1187 int btrfs_add_device(struct btrfs_trans_handle *trans,
1188                      struct btrfs_root *root,
1189                      struct btrfs_device *device)
1190 {
1191         int ret;
1192         struct btrfs_path *path;
1193         struct btrfs_dev_item *dev_item;
1194         struct extent_buffer *leaf;
1195         struct btrfs_key key;
1196         unsigned long ptr;
1197
1198         root = root->fs_info->chunk_root;
1199
1200         path = btrfs_alloc_path();
1201         if (!path)
1202                 return -ENOMEM;
1203
1204         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1205         key.type = BTRFS_DEV_ITEM_KEY;
1206         key.offset = device->devid;
1207
1208         ret = btrfs_insert_empty_item(trans, root, path, &key,
1209                                       sizeof(*dev_item));
1210         if (ret)
1211                 goto out;
1212
1213         leaf = path->nodes[0];
1214         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1215
1216         btrfs_set_device_id(leaf, dev_item, device->devid);
1217         btrfs_set_device_generation(leaf, dev_item, 0);
1218         btrfs_set_device_type(leaf, dev_item, device->type);
1219         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1220         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1221         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1222         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1223         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1224         btrfs_set_device_group(leaf, dev_item, 0);
1225         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1226         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1227         btrfs_set_device_start_offset(leaf, dev_item, 0);
1228
1229         ptr = (unsigned long)btrfs_device_uuid(dev_item);
1230         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1231         ptr = (unsigned long)btrfs_device_fsid(dev_item);
1232         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1233         btrfs_mark_buffer_dirty(leaf);
1234
1235         ret = 0;
1236 out:
1237         btrfs_free_path(path);
1238         return ret;
1239 }
1240
1241 static int btrfs_rm_dev_item(struct btrfs_root *root,
1242                              struct btrfs_device *device)
1243 {
1244         int ret;
1245         struct btrfs_path *path;
1246         struct btrfs_key key;
1247         struct btrfs_trans_handle *trans;
1248
1249         root = root->fs_info->chunk_root;
1250
1251         path = btrfs_alloc_path();
1252         if (!path)
1253                 return -ENOMEM;
1254
1255         trans = btrfs_start_transaction(root, 0);
1256         if (IS_ERR(trans)) {
1257                 btrfs_free_path(path);
1258                 return PTR_ERR(trans);
1259         }
1260         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1261         key.type = BTRFS_DEV_ITEM_KEY;
1262         key.offset = device->devid;
1263         lock_chunks(root);
1264
1265         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1266         if (ret < 0)
1267                 goto out;
1268
1269         if (ret > 0) {
1270                 ret = -ENOENT;
1271                 goto out;
1272         }
1273
1274         ret = btrfs_del_item(trans, root, path);
1275         if (ret)
1276                 goto out;
1277 out:
1278         btrfs_free_path(path);
1279         unlock_chunks(root);
1280         btrfs_commit_transaction(trans, root);
1281         return ret;
1282 }
1283
1284 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1285 {
1286         struct btrfs_device *device;
1287         struct btrfs_device *next_device;
1288         struct block_device *bdev;
1289         struct buffer_head *bh = NULL;
1290         struct btrfs_super_block *disk_super;
1291         struct btrfs_fs_devices *cur_devices;
1292         u64 all_avail;
1293         u64 devid;
1294         u64 num_devices;
1295         u8 *dev_uuid;
1296         int ret = 0;
1297         bool clear_super = false;
1298
1299         mutex_lock(&uuid_mutex);
1300
1301         all_avail = root->fs_info->avail_data_alloc_bits |
1302                 root->fs_info->avail_system_alloc_bits |
1303                 root->fs_info->avail_metadata_alloc_bits;
1304
1305         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1306             root->fs_info->fs_devices->num_devices <= 4) {
1307                 printk(KERN_ERR "btrfs: unable to go below four devices "
1308                        "on raid10\n");
1309                 ret = -EINVAL;
1310                 goto out;
1311         }
1312
1313         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1314             root->fs_info->fs_devices->num_devices <= 2) {
1315                 printk(KERN_ERR "btrfs: unable to go below two "
1316                        "devices on raid1\n");
1317                 ret = -EINVAL;
1318                 goto out;
1319         }
1320
1321         if (strcmp(device_path, "missing") == 0) {
1322                 struct list_head *devices;
1323                 struct btrfs_device *tmp;
1324
1325                 device = NULL;
1326                 devices = &root->fs_info->fs_devices->devices;
1327                 /*
1328                  * It is safe to read the devices since the volume_mutex
1329                  * is held.
1330                  */
1331                 list_for_each_entry(tmp, devices, dev_list) {
1332                         if (tmp->in_fs_metadata && !tmp->bdev) {
1333                                 device = tmp;
1334                                 break;
1335                         }
1336                 }
1337                 bdev = NULL;
1338                 bh = NULL;
1339                 disk_super = NULL;
1340                 if (!device) {
1341                         printk(KERN_ERR "btrfs: no missing devices found to "
1342                                "remove\n");
1343                         goto out;
1344                 }
1345         } else {
1346                 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1347                                           root->fs_info->bdev_holder);
1348                 if (IS_ERR(bdev)) {
1349                         ret = PTR_ERR(bdev);
1350                         goto out;
1351                 }
1352
1353                 set_blocksize(bdev, 4096);
1354                 bh = btrfs_read_dev_super(bdev);
1355                 if (!bh) {
1356                         ret = -EINVAL;
1357                         goto error_close;
1358                 }
1359                 disk_super = (struct btrfs_super_block *)bh->b_data;
1360                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1361                 dev_uuid = disk_super->dev_item.uuid;
1362                 device = btrfs_find_device(root, devid, dev_uuid,
1363                                            disk_super->fsid);
1364                 if (!device) {
1365                         ret = -ENOENT;
1366                         goto error_brelse;
1367                 }
1368         }
1369
1370         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1371                 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1372                        "device\n");
1373                 ret = -EINVAL;
1374                 goto error_brelse;
1375         }
1376
1377         if (device->writeable) {
1378                 lock_chunks(root);
1379                 list_del_init(&device->dev_alloc_list);
1380                 unlock_chunks(root);
1381                 root->fs_info->fs_devices->rw_devices--;
1382                 clear_super = true;
1383         }
1384
1385         ret = btrfs_shrink_device(device, 0);
1386         if (ret)
1387                 goto error_undo;
1388
1389         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1390         if (ret)
1391                 goto error_undo;
1392
1393         spin_lock(&root->fs_info->free_chunk_lock);
1394         root->fs_info->free_chunk_space = device->total_bytes -
1395                 device->bytes_used;
1396         spin_unlock(&root->fs_info->free_chunk_lock);
1397
1398         device->in_fs_metadata = 0;
1399         btrfs_scrub_cancel_dev(root, device);
1400
1401         /*
1402          * the device list mutex makes sure that we don't change
1403          * the device list while someone else is writing out all
1404          * the device supers.
1405          */
1406
1407         cur_devices = device->fs_devices;
1408         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1409         list_del_rcu(&device->dev_list);
1410
1411         device->fs_devices->num_devices--;
1412
1413         if (device->missing)
1414                 root->fs_info->fs_devices->missing_devices--;
1415
1416         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1417                                  struct btrfs_device, dev_list);
1418         if (device->bdev == root->fs_info->sb->s_bdev)
1419                 root->fs_info->sb->s_bdev = next_device->bdev;
1420         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1421                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1422
1423         if (device->bdev)
1424                 device->fs_devices->open_devices--;
1425
1426         call_rcu(&device->rcu, free_device);
1427         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1428
1429         num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1430         btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1431
1432         if (cur_devices->open_devices == 0) {
1433                 struct btrfs_fs_devices *fs_devices;
1434                 fs_devices = root->fs_info->fs_devices;
1435                 while (fs_devices) {
1436                         if (fs_devices->seed == cur_devices)
1437                                 break;
1438                         fs_devices = fs_devices->seed;
1439                 }
1440                 fs_devices->seed = cur_devices->seed;
1441                 cur_devices->seed = NULL;
1442                 lock_chunks(root);
1443                 __btrfs_close_devices(cur_devices);
1444                 unlock_chunks(root);
1445                 free_fs_devices(cur_devices);
1446         }
1447
1448         /*
1449          * at this point, the device is zero sized.  We want to
1450          * remove it from the devices list and zero out the old super
1451          */
1452         if (clear_super) {
1453                 /* make sure this device isn't detected as part of
1454                  * the FS anymore
1455                  */
1456                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1457                 set_buffer_dirty(bh);
1458                 sync_dirty_buffer(bh);
1459         }
1460
1461         ret = 0;
1462
1463 error_brelse:
1464         brelse(bh);
1465 error_close:
1466         if (bdev)
1467                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1468 out:
1469         mutex_unlock(&uuid_mutex);
1470         return ret;
1471 error_undo:
1472         if (device->writeable) {
1473                 lock_chunks(root);
1474                 list_add(&device->dev_alloc_list,
1475                          &root->fs_info->fs_devices->alloc_list);
1476                 unlock_chunks(root);
1477                 root->fs_info->fs_devices->rw_devices++;
1478         }
1479         goto error_brelse;
1480 }
1481
1482 /*
1483  * does all the dirty work required for changing file system's UUID.
1484  */
1485 static int btrfs_prepare_sprout(struct btrfs_root *root)
1486 {
1487         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1488         struct btrfs_fs_devices *old_devices;
1489         struct btrfs_fs_devices *seed_devices;
1490         struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1491         struct btrfs_device *device;
1492         u64 super_flags;
1493
1494         BUG_ON(!mutex_is_locked(&uuid_mutex));
1495         if (!fs_devices->seeding)
1496                 return -EINVAL;
1497
1498         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1499         if (!seed_devices)
1500                 return -ENOMEM;
1501
1502         old_devices = clone_fs_devices(fs_devices);
1503         if (IS_ERR(old_devices)) {
1504                 kfree(seed_devices);
1505                 return PTR_ERR(old_devices);
1506         }
1507
1508         list_add(&old_devices->list, &fs_uuids);
1509
1510         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1511         seed_devices->opened = 1;
1512         INIT_LIST_HEAD(&seed_devices->devices);
1513         INIT_LIST_HEAD(&seed_devices->alloc_list);
1514         mutex_init(&seed_devices->device_list_mutex);
1515
1516         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1517         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1518                               synchronize_rcu);
1519         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1520
1521         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1522         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1523                 device->fs_devices = seed_devices;
1524         }
1525
1526         fs_devices->seeding = 0;
1527         fs_devices->num_devices = 0;
1528         fs_devices->open_devices = 0;
1529         fs_devices->seed = seed_devices;
1530
1531         generate_random_uuid(fs_devices->fsid);
1532         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1533         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1534         super_flags = btrfs_super_flags(disk_super) &
1535                       ~BTRFS_SUPER_FLAG_SEEDING;
1536         btrfs_set_super_flags(disk_super, super_flags);
1537
1538         return 0;
1539 }
1540
1541 /*
1542  * strore the expected generation for seed devices in device items.
1543  */
1544 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1545                                struct btrfs_root *root)
1546 {
1547         struct btrfs_path *path;
1548         struct extent_buffer *leaf;
1549         struct btrfs_dev_item *dev_item;
1550         struct btrfs_device *device;
1551         struct btrfs_key key;
1552         u8 fs_uuid[BTRFS_UUID_SIZE];
1553         u8 dev_uuid[BTRFS_UUID_SIZE];
1554         u64 devid;
1555         int ret;
1556
1557         path = btrfs_alloc_path();
1558         if (!path)
1559                 return -ENOMEM;
1560
1561         root = root->fs_info->chunk_root;
1562         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1563         key.offset = 0;
1564         key.type = BTRFS_DEV_ITEM_KEY;
1565
1566         while (1) {
1567                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1568                 if (ret < 0)
1569                         goto error;
1570
1571                 leaf = path->nodes[0];
1572 next_slot:
1573                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1574                         ret = btrfs_next_leaf(root, path);
1575                         if (ret > 0)
1576                                 break;
1577                         if (ret < 0)
1578                                 goto error;
1579                         leaf = path->nodes[0];
1580                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1581                         btrfs_release_path(path);
1582                         continue;
1583                 }
1584
1585                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1586                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1587                     key.type != BTRFS_DEV_ITEM_KEY)
1588                         break;
1589
1590                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1591                                           struct btrfs_dev_item);
1592                 devid = btrfs_device_id(leaf, dev_item);
1593                 read_extent_buffer(leaf, dev_uuid,
1594                                    (unsigned long)btrfs_device_uuid(dev_item),
1595                                    BTRFS_UUID_SIZE);
1596                 read_extent_buffer(leaf, fs_uuid,
1597                                    (unsigned long)btrfs_device_fsid(dev_item),
1598                                    BTRFS_UUID_SIZE);
1599                 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1600                 BUG_ON(!device);
1601
1602                 if (device->fs_devices->seeding) {
1603                         btrfs_set_device_generation(leaf, dev_item,
1604                                                     device->generation);
1605                         btrfs_mark_buffer_dirty(leaf);
1606                 }
1607
1608                 path->slots[0]++;
1609                 goto next_slot;
1610         }
1611         ret = 0;
1612 error:
1613         btrfs_free_path(path);
1614         return ret;
1615 }
1616
1617 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1618 {
1619         struct request_queue *q;
1620         struct btrfs_trans_handle *trans;
1621         struct btrfs_device *device;
1622         struct block_device *bdev;
1623         struct list_head *devices;
1624         struct super_block *sb = root->fs_info->sb;
1625         u64 total_bytes;
1626         int seeding_dev = 0;
1627         int ret = 0;
1628
1629         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1630                 return -EINVAL;
1631
1632         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1633                                   root->fs_info->bdev_holder);
1634         if (IS_ERR(bdev))
1635                 return PTR_ERR(bdev);
1636
1637         if (root->fs_info->fs_devices->seeding) {
1638                 seeding_dev = 1;
1639                 down_write(&sb->s_umount);
1640                 mutex_lock(&uuid_mutex);
1641         }
1642
1643         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1644
1645         devices = &root->fs_info->fs_devices->devices;
1646         /*
1647          * we have the volume lock, so we don't need the extra
1648          * device list mutex while reading the list here.
1649          */
1650         list_for_each_entry(device, devices, dev_list) {
1651                 if (device->bdev == bdev) {
1652                         ret = -EEXIST;
1653                         goto error;
1654                 }
1655         }
1656
1657         device = kzalloc(sizeof(*device), GFP_NOFS);
1658         if (!device) {
1659                 /* we can safely leave the fs_devices entry around */
1660                 ret = -ENOMEM;
1661                 goto error;
1662         }
1663
1664         device->name = kstrdup(device_path, GFP_NOFS);
1665         if (!device->name) {
1666                 kfree(device);
1667                 ret = -ENOMEM;
1668                 goto error;
1669         }
1670
1671         ret = find_next_devid(root, &device->devid);
1672         if (ret) {
1673                 kfree(device->name);
1674                 kfree(device);
1675                 goto error;
1676         }
1677
1678         trans = btrfs_start_transaction(root, 0);
1679         if (IS_ERR(trans)) {
1680                 kfree(device->name);
1681                 kfree(device);
1682                 ret = PTR_ERR(trans);
1683                 goto error;
1684         }
1685
1686         lock_chunks(root);
1687
1688         q = bdev_get_queue(bdev);
1689         if (blk_queue_discard(q))
1690                 device->can_discard = 1;
1691         device->writeable = 1;
1692         device->work.func = pending_bios_fn;
1693         generate_random_uuid(device->uuid);
1694         spin_lock_init(&device->io_lock);
1695         device->generation = trans->transid;
1696         device->io_width = root->sectorsize;
1697         device->io_align = root->sectorsize;
1698         device->sector_size = root->sectorsize;
1699         device->total_bytes = i_size_read(bdev->bd_inode);
1700         device->disk_total_bytes = device->total_bytes;
1701         device->dev_root = root->fs_info->dev_root;
1702         device->bdev = bdev;
1703         device->in_fs_metadata = 1;
1704         device->mode = FMODE_EXCL;
1705         set_blocksize(device->bdev, 4096);
1706
1707         if (seeding_dev) {
1708                 sb->s_flags &= ~MS_RDONLY;
1709                 ret = btrfs_prepare_sprout(root);
1710                 BUG_ON(ret);
1711         }
1712
1713         device->fs_devices = root->fs_info->fs_devices;
1714
1715         /*
1716          * we don't want write_supers to jump in here with our device
1717          * half setup
1718          */
1719         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1720         list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1721         list_add(&device->dev_alloc_list,
1722                  &root->fs_info->fs_devices->alloc_list);
1723         root->fs_info->fs_devices->num_devices++;
1724         root->fs_info->fs_devices->open_devices++;
1725         root->fs_info->fs_devices->rw_devices++;
1726         if (device->can_discard)
1727                 root->fs_info->fs_devices->num_can_discard++;
1728         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1729
1730         spin_lock(&root->fs_info->free_chunk_lock);
1731         root->fs_info->free_chunk_space += device->total_bytes;
1732         spin_unlock(&root->fs_info->free_chunk_lock);
1733
1734         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1735                 root->fs_info->fs_devices->rotating = 1;
1736
1737         total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1738         btrfs_set_super_total_bytes(root->fs_info->super_copy,
1739                                     total_bytes + device->total_bytes);
1740
1741         total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1742         btrfs_set_super_num_devices(root->fs_info->super_copy,
1743                                     total_bytes + 1);
1744         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1745
1746         if (seeding_dev) {
1747                 ret = init_first_rw_device(trans, root, device);
1748                 BUG_ON(ret);
1749                 ret = btrfs_finish_sprout(trans, root);
1750                 BUG_ON(ret);
1751         } else {
1752                 ret = btrfs_add_device(trans, root, device);
1753         }
1754
1755         /*
1756          * we've got more storage, clear any full flags on the space
1757          * infos
1758          */
1759         btrfs_clear_space_info_full(root->fs_info);
1760
1761         unlock_chunks(root);
1762         btrfs_commit_transaction(trans, root);
1763
1764         if (seeding_dev) {
1765                 mutex_unlock(&uuid_mutex);
1766                 up_write(&sb->s_umount);
1767
1768                 ret = btrfs_relocate_sys_chunks(root);
1769                 BUG_ON(ret);
1770         }
1771
1772         return ret;
1773 error:
1774         blkdev_put(bdev, FMODE_EXCL);
1775         if (seeding_dev) {
1776                 mutex_unlock(&uuid_mutex);
1777                 up_write(&sb->s_umount);
1778         }
1779         return ret;
1780 }
1781
1782 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1783                                         struct btrfs_device *device)
1784 {
1785         int ret;
1786         struct btrfs_path *path;
1787         struct btrfs_root *root;
1788         struct btrfs_dev_item *dev_item;
1789         struct extent_buffer *leaf;
1790         struct btrfs_key key;
1791
1792         root = device->dev_root->fs_info->chunk_root;
1793
1794         path = btrfs_alloc_path();
1795         if (!path)
1796                 return -ENOMEM;
1797
1798         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1799         key.type = BTRFS_DEV_ITEM_KEY;
1800         key.offset = device->devid;
1801
1802         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1803         if (ret < 0)
1804                 goto out;
1805
1806         if (ret > 0) {
1807                 ret = -ENOENT;
1808                 goto out;
1809         }
1810
1811         leaf = path->nodes[0];
1812         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1813
1814         btrfs_set_device_id(leaf, dev_item, device->devid);
1815         btrfs_set_device_type(leaf, dev_item, device->type);
1816         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1817         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1818         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1819         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1820         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1821         btrfs_mark_buffer_dirty(leaf);
1822
1823 out:
1824         btrfs_free_path(path);
1825         return ret;
1826 }
1827
1828 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1829                       struct btrfs_device *device, u64 new_size)
1830 {
1831         struct btrfs_super_block *super_copy =
1832                 device->dev_root->fs_info->super_copy;
1833         u64 old_total = btrfs_super_total_bytes(super_copy);
1834         u64 diff = new_size - device->total_bytes;
1835
1836         if (!device->writeable)
1837                 return -EACCES;
1838         if (new_size <= device->total_bytes)
1839                 return -EINVAL;
1840
1841         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1842         device->fs_devices->total_rw_bytes += diff;
1843
1844         device->total_bytes = new_size;
1845         device->disk_total_bytes = new_size;
1846         btrfs_clear_space_info_full(device->dev_root->fs_info);
1847
1848         return btrfs_update_device(trans, device);
1849 }
1850
1851 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1852                       struct btrfs_device *device, u64 new_size)
1853 {
1854         int ret;
1855         lock_chunks(device->dev_root);
1856         ret = __btrfs_grow_device(trans, device, new_size);
1857         unlock_chunks(device->dev_root);
1858         return ret;
1859 }
1860
1861 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1862                             struct btrfs_root *root,
1863                             u64 chunk_tree, u64 chunk_objectid,
1864                             u64 chunk_offset)
1865 {
1866         int ret;
1867         struct btrfs_path *path;
1868         struct btrfs_key key;
1869
1870         root = root->fs_info->chunk_root;
1871         path = btrfs_alloc_path();
1872         if (!path)
1873                 return -ENOMEM;
1874
1875         key.objectid = chunk_objectid;
1876         key.offset = chunk_offset;
1877         key.type = BTRFS_CHUNK_ITEM_KEY;
1878
1879         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1880         BUG_ON(ret);
1881
1882         ret = btrfs_del_item(trans, root, path);
1883
1884         btrfs_free_path(path);
1885         return ret;
1886 }
1887
1888 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1889                         chunk_offset)
1890 {
1891         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1892         struct btrfs_disk_key *disk_key;
1893         struct btrfs_chunk *chunk;
1894         u8 *ptr;
1895         int ret = 0;
1896         u32 num_stripes;
1897         u32 array_size;
1898         u32 len = 0;
1899         u32 cur;
1900         struct btrfs_key key;
1901
1902         array_size = btrfs_super_sys_array_size(super_copy);
1903
1904         ptr = super_copy->sys_chunk_array;
1905         cur = 0;
1906
1907         while (cur < array_size) {
1908                 disk_key = (struct btrfs_disk_key *)ptr;
1909                 btrfs_disk_key_to_cpu(&key, disk_key);
1910
1911                 len = sizeof(*disk_key);
1912
1913                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1914                         chunk = (struct btrfs_chunk *)(ptr + len);
1915                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1916                         len += btrfs_chunk_item_size(num_stripes);
1917                 } else {
1918                         ret = -EIO;
1919                         break;
1920                 }
1921                 if (key.objectid == chunk_objectid &&
1922                     key.offset == chunk_offset) {
1923                         memmove(ptr, ptr + len, array_size - (cur + len));
1924                         array_size -= len;
1925                         btrfs_set_super_sys_array_size(super_copy, array_size);
1926                 } else {
1927                         ptr += len;
1928                         cur += len;
1929                 }
1930         }
1931         return ret;
1932 }
1933
1934 static int btrfs_relocate_chunk(struct btrfs_root *root,
1935                          u64 chunk_tree, u64 chunk_objectid,
1936                          u64 chunk_offset)
1937 {
1938         struct extent_map_tree *em_tree;
1939         struct btrfs_root *extent_root;
1940         struct btrfs_trans_handle *trans;
1941         struct extent_map *em;
1942         struct map_lookup *map;
1943         int ret;
1944         int i;
1945
1946         root = root->fs_info->chunk_root;
1947         extent_root = root->fs_info->extent_root;
1948         em_tree = &root->fs_info->mapping_tree.map_tree;
1949
1950         ret = btrfs_can_relocate(extent_root, chunk_offset);
1951         if (ret)
1952                 return -ENOSPC;
1953
1954         /* step one, relocate all the extents inside this chunk */
1955         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1956         if (ret)
1957                 return ret;
1958
1959         trans = btrfs_start_transaction(root, 0);
1960         BUG_ON(IS_ERR(trans));
1961
1962         lock_chunks(root);
1963
1964         /*
1965          * step two, delete the device extents and the
1966          * chunk tree entries
1967          */
1968         read_lock(&em_tree->lock);
1969         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1970         read_unlock(&em_tree->lock);
1971
1972         BUG_ON(!em || em->start > chunk_offset ||
1973                em->start + em->len < chunk_offset);
1974         map = (struct map_lookup *)em->bdev;
1975
1976         for (i = 0; i < map->num_stripes; i++) {
1977                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1978                                             map->stripes[i].physical);
1979                 BUG_ON(ret);
1980
1981                 if (map->stripes[i].dev) {
1982                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1983                         BUG_ON(ret);
1984                 }
1985         }
1986         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1987                                chunk_offset);
1988
1989         BUG_ON(ret);
1990
1991         trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
1992
1993         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1994                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1995                 BUG_ON(ret);
1996         }
1997
1998         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1999         BUG_ON(ret);
2000
2001         write_lock(&em_tree->lock);
2002         remove_extent_mapping(em_tree, em);
2003         write_unlock(&em_tree->lock);
2004
2005         kfree(map);
2006         em->bdev = NULL;
2007
2008         /* once for the tree */
2009         free_extent_map(em);
2010         /* once for us */
2011         free_extent_map(em);
2012
2013         unlock_chunks(root);
2014         btrfs_end_transaction(trans, root);
2015         return 0;
2016 }
2017
2018 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2019 {
2020         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2021         struct btrfs_path *path;
2022         struct extent_buffer *leaf;
2023         struct btrfs_chunk *chunk;
2024         struct btrfs_key key;
2025         struct btrfs_key found_key;
2026         u64 chunk_tree = chunk_root->root_key.objectid;
2027         u64 chunk_type;
2028         bool retried = false;
2029         int failed = 0;
2030         int ret;
2031
2032         path = btrfs_alloc_path();
2033         if (!path)
2034                 return -ENOMEM;
2035
2036 again:
2037         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2038         key.offset = (u64)-1;
2039         key.type = BTRFS_CHUNK_ITEM_KEY;
2040
2041         while (1) {
2042                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2043                 if (ret < 0)
2044                         goto error;
2045                 BUG_ON(ret == 0);
2046
2047                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2048                                           key.type);
2049                 if (ret < 0)
2050                         goto error;
2051                 if (ret > 0)
2052                         break;
2053
2054                 leaf = path->nodes[0];
2055                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2056
2057                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2058                                        struct btrfs_chunk);
2059                 chunk_type = btrfs_chunk_type(leaf, chunk);
2060                 btrfs_release_path(path);
2061
2062                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2063                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2064                                                    found_key.objectid,
2065                                                    found_key.offset);
2066                         if (ret == -ENOSPC)
2067                                 failed++;
2068                         else if (ret)
2069                                 BUG();
2070                 }
2071
2072                 if (found_key.offset == 0)
2073                         break;
2074                 key.offset = found_key.offset - 1;
2075         }
2076         ret = 0;
2077         if (failed && !retried) {
2078                 failed = 0;
2079                 retried = true;
2080                 goto again;
2081         } else if (failed && retried) {
2082                 WARN_ON(1);
2083                 ret = -ENOSPC;
2084         }
2085 error:
2086         btrfs_free_path(path);
2087         return ret;
2088 }
2089
2090 static int insert_balance_item(struct btrfs_root *root,
2091                                struct btrfs_balance_control *bctl)
2092 {
2093         struct btrfs_trans_handle *trans;
2094         struct btrfs_balance_item *item;
2095         struct btrfs_disk_balance_args disk_bargs;
2096         struct btrfs_path *path;
2097         struct extent_buffer *leaf;
2098         struct btrfs_key key;
2099         int ret, err;
2100
2101         path = btrfs_alloc_path();
2102         if (!path)
2103                 return -ENOMEM;
2104
2105         trans = btrfs_start_transaction(root, 0);
2106         if (IS_ERR(trans)) {
2107                 btrfs_free_path(path);
2108                 return PTR_ERR(trans);
2109         }
2110
2111         key.objectid = BTRFS_BALANCE_OBJECTID;
2112         key.type = BTRFS_BALANCE_ITEM_KEY;
2113         key.offset = 0;
2114
2115         ret = btrfs_insert_empty_item(trans, root, path, &key,
2116                                       sizeof(*item));
2117         if (ret)
2118                 goto out;
2119
2120         leaf = path->nodes[0];
2121         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2122
2123         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2124
2125         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2126         btrfs_set_balance_data(leaf, item, &disk_bargs);
2127         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2128         btrfs_set_balance_meta(leaf, item, &disk_bargs);
2129         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2130         btrfs_set_balance_sys(leaf, item, &disk_bargs);
2131
2132         btrfs_set_balance_flags(leaf, item, bctl->flags);
2133
2134         btrfs_mark_buffer_dirty(leaf);
2135 out:
2136         btrfs_free_path(path);
2137         err = btrfs_commit_transaction(trans, root);
2138         if (err && !ret)
2139                 ret = err;
2140         return ret;
2141 }
2142
2143 static int del_balance_item(struct btrfs_root *root)
2144 {
2145         struct btrfs_trans_handle *trans;
2146         struct btrfs_path *path;
2147         struct btrfs_key key;
2148         int ret, err;
2149
2150         path = btrfs_alloc_path();
2151         if (!path)
2152                 return -ENOMEM;
2153
2154         trans = btrfs_start_transaction(root, 0);
2155         if (IS_ERR(trans)) {
2156                 btrfs_free_path(path);
2157                 return PTR_ERR(trans);
2158         }
2159
2160         key.objectid = BTRFS_BALANCE_OBJECTID;
2161         key.type = BTRFS_BALANCE_ITEM_KEY;
2162         key.offset = 0;
2163
2164         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2165         if (ret < 0)
2166                 goto out;
2167         if (ret > 0) {
2168                 ret = -ENOENT;
2169                 goto out;
2170         }
2171
2172         ret = btrfs_del_item(trans, root, path);
2173 out:
2174         btrfs_free_path(path);
2175         err = btrfs_commit_transaction(trans, root);
2176         if (err && !ret)
2177                 ret = err;
2178         return ret;
2179 }
2180
2181 /*
2182  * This is a heuristic used to reduce the number of chunks balanced on
2183  * resume after balance was interrupted.
2184  */
2185 static void update_balance_args(struct btrfs_balance_control *bctl)
2186 {
2187         /*
2188          * Turn on soft mode for chunk types that were being converted.
2189          */
2190         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2191                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2192         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2193                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2194         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2195                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2196
2197         /*
2198          * Turn on usage filter if is not already used.  The idea is
2199          * that chunks that we have already balanced should be
2200          * reasonably full.  Don't do it for chunks that are being
2201          * converted - that will keep us from relocating unconverted
2202          * (albeit full) chunks.
2203          */
2204         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2205             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2206                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2207                 bctl->data.usage = 90;
2208         }
2209         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2210             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2211                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2212                 bctl->sys.usage = 90;
2213         }
2214         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2215             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2216                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2217                 bctl->meta.usage = 90;
2218         }
2219 }
2220
2221 /*
2222  * Should be called with both balance and volume mutexes held to
2223  * serialize other volume operations (add_dev/rm_dev/resize) with
2224  * restriper.  Same goes for unset_balance_control.
2225  */
2226 static void set_balance_control(struct btrfs_balance_control *bctl)
2227 {
2228         struct btrfs_fs_info *fs_info = bctl->fs_info;
2229
2230         BUG_ON(fs_info->balance_ctl);
2231
2232         spin_lock(&fs_info->balance_lock);
2233         fs_info->balance_ctl = bctl;
2234         spin_unlock(&fs_info->balance_lock);
2235 }
2236
2237 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2238 {
2239         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2240
2241         BUG_ON(!fs_info->balance_ctl);
2242
2243         spin_lock(&fs_info->balance_lock);
2244         fs_info->balance_ctl = NULL;
2245         spin_unlock(&fs_info->balance_lock);
2246
2247         kfree(bctl);
2248 }
2249
2250 /*
2251  * Balance filters.  Return 1 if chunk should be filtered out
2252  * (should not be balanced).
2253  */
2254 static int chunk_profiles_filter(u64 chunk_profile,
2255                                  struct btrfs_balance_args *bargs)
2256 {
2257         chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2258
2259         if (chunk_profile == 0)
2260                 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2261
2262         if (bargs->profiles & chunk_profile)
2263                 return 0;
2264
2265         return 1;
2266 }
2267
2268 static u64 div_factor_fine(u64 num, int factor)
2269 {
2270         if (factor <= 0)
2271                 return 0;
2272         if (factor >= 100)
2273                 return num;
2274
2275         num *= factor;
2276         do_div(num, 100);
2277         return num;
2278 }
2279
2280 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2281                               struct btrfs_balance_args *bargs)
2282 {
2283         struct btrfs_block_group_cache *cache;
2284         u64 chunk_used, user_thresh;
2285         int ret = 1;
2286
2287         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2288         chunk_used = btrfs_block_group_used(&cache->item);
2289
2290         user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2291         if (chunk_used < user_thresh)
2292                 ret = 0;
2293
2294         btrfs_put_block_group(cache);
2295         return ret;
2296 }
2297
2298 static int chunk_devid_filter(struct extent_buffer *leaf,
2299                               struct btrfs_chunk *chunk,
2300                               struct btrfs_balance_args *bargs)
2301 {
2302         struct btrfs_stripe *stripe;
2303         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2304         int i;
2305
2306         for (i = 0; i < num_stripes; i++) {
2307                 stripe = btrfs_stripe_nr(chunk, i);
2308                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2309                         return 0;
2310         }
2311
2312         return 1;
2313 }
2314
2315 /* [pstart, pend) */
2316 static int chunk_drange_filter(struct extent_buffer *leaf,
2317                                struct btrfs_chunk *chunk,
2318                                u64 chunk_offset,
2319                                struct btrfs_balance_args *bargs)
2320 {
2321         struct btrfs_stripe *stripe;
2322         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2323         u64 stripe_offset;
2324         u64 stripe_length;
2325         int factor;
2326         int i;
2327
2328         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2329                 return 0;
2330
2331         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2332              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2333                 factor = 2;
2334         else
2335                 factor = 1;
2336         factor = num_stripes / factor;
2337
2338         for (i = 0; i < num_stripes; i++) {
2339                 stripe = btrfs_stripe_nr(chunk, i);
2340                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2341                         continue;
2342
2343                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2344                 stripe_length = btrfs_chunk_length(leaf, chunk);
2345                 do_div(stripe_length, factor);
2346
2347                 if (stripe_offset < bargs->pend &&
2348                     stripe_offset + stripe_length > bargs->pstart)
2349                         return 0;
2350         }
2351
2352         return 1;
2353 }
2354
2355 /* [vstart, vend) */
2356 static int chunk_vrange_filter(struct extent_buffer *leaf,
2357                                struct btrfs_chunk *chunk,
2358                                u64 chunk_offset,
2359                                struct btrfs_balance_args *bargs)
2360 {
2361         if (chunk_offset < bargs->vend &&
2362             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2363                 /* at least part of the chunk is inside this vrange */
2364                 return 0;
2365
2366         return 1;
2367 }
2368
2369 static int chunk_soft_convert_filter(u64 chunk_profile,
2370                                      struct btrfs_balance_args *bargs)
2371 {
2372         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2373                 return 0;
2374
2375         chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
2376
2377         if (chunk_profile == 0)
2378                 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2379
2380         if (bargs->target & chunk_profile)
2381                 return 1;
2382
2383         return 0;
2384 }
2385
2386 static int should_balance_chunk(struct btrfs_root *root,
2387                                 struct extent_buffer *leaf,
2388                                 struct btrfs_chunk *chunk, u64 chunk_offset)
2389 {
2390         struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2391         struct btrfs_balance_args *bargs = NULL;
2392         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2393
2394         /* type filter */
2395         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2396               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2397                 return 0;
2398         }
2399
2400         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2401                 bargs = &bctl->data;
2402         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2403                 bargs = &bctl->sys;
2404         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2405                 bargs = &bctl->meta;
2406
2407         /* profiles filter */
2408         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2409             chunk_profiles_filter(chunk_type, bargs)) {
2410                 return 0;
2411         }
2412
2413         /* usage filter */
2414         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2415             chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2416                 return 0;
2417         }
2418
2419         /* devid filter */
2420         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2421             chunk_devid_filter(leaf, chunk, bargs)) {
2422                 return 0;
2423         }
2424
2425         /* drange filter, makes sense only with devid filter */
2426         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2427             chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2428                 return 0;
2429         }
2430
2431         /* vrange filter */
2432         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2433             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2434                 return 0;
2435         }
2436
2437         /* soft profile changing mode */
2438         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2439             chunk_soft_convert_filter(chunk_type, bargs)) {
2440                 return 0;
2441         }
2442
2443         return 1;
2444 }
2445
2446 static u64 div_factor(u64 num, int factor)
2447 {
2448         if (factor == 10)
2449                 return num;
2450         num *= factor;
2451         do_div(num, 10);
2452         return num;
2453 }
2454
2455 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2456 {
2457         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2458         struct btrfs_root *chunk_root = fs_info->chunk_root;
2459         struct btrfs_root *dev_root = fs_info->dev_root;
2460         struct list_head *devices;
2461         struct btrfs_device *device;
2462         u64 old_size;
2463         u64 size_to_free;
2464         struct btrfs_chunk *chunk;
2465         struct btrfs_path *path;
2466         struct btrfs_key key;
2467         struct btrfs_key found_key;
2468         struct btrfs_trans_handle *trans;
2469         struct extent_buffer *leaf;
2470         int slot;
2471         int ret;
2472         int enospc_errors = 0;
2473         bool counting = true;
2474
2475         /* step one make some room on all the devices */
2476         devices = &fs_info->fs_devices->devices;
2477         list_for_each_entry(device, devices, dev_list) {
2478                 old_size = device->total_bytes;
2479                 size_to_free = div_factor(old_size, 1);
2480                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2481                 if (!device->writeable ||
2482                     device->total_bytes - device->bytes_used > size_to_free)
2483                         continue;
2484
2485                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2486                 if (ret == -ENOSPC)
2487                         break;
2488                 BUG_ON(ret);
2489
2490                 trans = btrfs_start_transaction(dev_root, 0);
2491                 BUG_ON(IS_ERR(trans));
2492
2493                 ret = btrfs_grow_device(trans, device, old_size);
2494                 BUG_ON(ret);
2495
2496                 btrfs_end_transaction(trans, dev_root);
2497         }
2498
2499         /* step two, relocate all the chunks */
2500         path = btrfs_alloc_path();
2501         if (!path) {
2502                 ret = -ENOMEM;
2503                 goto error;
2504         }
2505
2506         /* zero out stat counters */
2507         spin_lock(&fs_info->balance_lock);
2508         memset(&bctl->stat, 0, sizeof(bctl->stat));
2509         spin_unlock(&fs_info->balance_lock);
2510 again:
2511         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2512         key.offset = (u64)-1;
2513         key.type = BTRFS_CHUNK_ITEM_KEY;
2514
2515         while (1) {
2516                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2517                     atomic_read(&fs_info->balance_cancel_req)) {
2518                         ret = -ECANCELED;
2519                         goto error;
2520                 }
2521
2522                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2523                 if (ret < 0)
2524                         goto error;
2525
2526                 /*
2527                  * this shouldn't happen, it means the last relocate
2528                  * failed
2529                  */
2530                 if (ret == 0)
2531                         BUG(); /* FIXME break ? */
2532
2533                 ret = btrfs_previous_item(chunk_root, path, 0,
2534                                           BTRFS_CHUNK_ITEM_KEY);
2535                 if (ret) {
2536                         ret = 0;
2537                         break;
2538                 }
2539
2540                 leaf = path->nodes[0];
2541                 slot = path->slots[0];
2542                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2543
2544                 if (found_key.objectid != key.objectid)
2545                         break;
2546
2547                 /* chunk zero is special */
2548                 if (found_key.offset == 0)
2549                         break;
2550
2551                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2552
2553                 if (!counting) {
2554                         spin_lock(&fs_info->balance_lock);
2555                         bctl->stat.considered++;
2556                         spin_unlock(&fs_info->balance_lock);
2557                 }
2558
2559                 ret = should_balance_chunk(chunk_root, leaf, chunk,
2560                                            found_key.offset);
2561                 btrfs_release_path(path);
2562                 if (!ret)
2563                         goto loop;
2564
2565                 if (counting) {
2566                         spin_lock(&fs_info->balance_lock);
2567                         bctl->stat.expected++;
2568                         spin_unlock(&fs_info->balance_lock);
2569                         goto loop;
2570                 }
2571
2572                 ret = btrfs_relocate_chunk(chunk_root,
2573                                            chunk_root->root_key.objectid,
2574                                            found_key.objectid,
2575                                            found_key.offset);
2576                 if (ret && ret != -ENOSPC)
2577                         goto error;
2578                 if (ret == -ENOSPC) {
2579                         enospc_errors++;
2580                 } else {
2581                         spin_lock(&fs_info->balance_lock);
2582                         bctl->stat.completed++;
2583                         spin_unlock(&fs_info->balance_lock);
2584                 }
2585 loop:
2586                 key.offset = found_key.offset - 1;
2587         }
2588
2589         if (counting) {
2590                 btrfs_release_path(path);
2591                 counting = false;
2592                 goto again;
2593         }
2594 error:
2595         btrfs_free_path(path);
2596         if (enospc_errors) {
2597                 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2598                        enospc_errors);
2599                 if (!ret)
2600                         ret = -ENOSPC;
2601         }
2602
2603         return ret;
2604 }
2605
2606 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2607 {
2608         /* cancel requested || normal exit path */
2609         return atomic_read(&fs_info->balance_cancel_req) ||
2610                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2611                  atomic_read(&fs_info->balance_cancel_req) == 0);
2612 }
2613
2614 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2615 {
2616         int ret;
2617
2618         unset_balance_control(fs_info);
2619         ret = del_balance_item(fs_info->tree_root);
2620         BUG_ON(ret);
2621 }
2622
2623 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2624                                struct btrfs_ioctl_balance_args *bargs);
2625
2626 /*
2627  * Should be called with both balance and volume mutexes held
2628  */
2629 int btrfs_balance(struct btrfs_balance_control *bctl,
2630                   struct btrfs_ioctl_balance_args *bargs)
2631 {
2632         struct btrfs_fs_info *fs_info = bctl->fs_info;
2633         u64 allowed;
2634         int ret;
2635
2636         if (btrfs_fs_closing(fs_info) ||
2637             atomic_read(&fs_info->balance_pause_req) ||
2638             atomic_read(&fs_info->balance_cancel_req)) {
2639                 ret = -EINVAL;
2640                 goto out;
2641         }
2642
2643         /*
2644          * In case of mixed groups both data and meta should be picked,
2645          * and identical options should be given for both of them.
2646          */
2647         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2648         if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2649             (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2650                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2651                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2652                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2653                         printk(KERN_ERR "btrfs: with mixed groups data and "
2654                                "metadata balance options must be the same\n");
2655                         ret = -EINVAL;
2656                         goto out;
2657                 }
2658         }
2659
2660         /*
2661          * Profile changing sanity checks.  Skip them if a simple
2662          * balance is requested.
2663          */
2664         if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2665               BTRFS_BALANCE_ARGS_CONVERT))
2666                 goto do_balance;
2667
2668         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2669         if (fs_info->fs_devices->num_devices == 1)
2670                 allowed |= BTRFS_BLOCK_GROUP_DUP;
2671         else if (fs_info->fs_devices->num_devices < 4)
2672                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2673         else
2674                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2675                                 BTRFS_BLOCK_GROUP_RAID10);
2676
2677         if (!profile_is_valid(bctl->data.target, 1) ||
2678             bctl->data.target & ~allowed) {
2679                 printk(KERN_ERR "btrfs: unable to start balance with target "
2680                        "data profile %llu\n",
2681                        (unsigned long long)bctl->data.target);
2682                 ret = -EINVAL;
2683                 goto out;
2684         }
2685         if (!profile_is_valid(bctl->meta.target, 1) ||
2686             bctl->meta.target & ~allowed) {
2687                 printk(KERN_ERR "btrfs: unable to start balance with target "
2688                        "metadata profile %llu\n",
2689                        (unsigned long long)bctl->meta.target);
2690                 ret = -EINVAL;
2691                 goto out;
2692         }
2693         if (!profile_is_valid(bctl->sys.target, 1) ||
2694             bctl->sys.target & ~allowed) {
2695                 printk(KERN_ERR "btrfs: unable to start balance with target "
2696                        "system profile %llu\n",
2697                        (unsigned long long)bctl->sys.target);
2698                 ret = -EINVAL;
2699                 goto out;
2700         }
2701
2702         if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
2703                 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2704                 ret = -EINVAL;
2705                 goto out;
2706         }
2707
2708         /* allow to reduce meta or sys integrity only if force set */
2709         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2710                         BTRFS_BLOCK_GROUP_RAID10;
2711         if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2712              (fs_info->avail_system_alloc_bits & allowed) &&
2713              !(bctl->sys.target & allowed)) ||
2714             ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2715              (fs_info->avail_metadata_alloc_bits & allowed) &&
2716              !(bctl->meta.target & allowed))) {
2717                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2718                         printk(KERN_INFO "btrfs: force reducing metadata "
2719                                "integrity\n");
2720                 } else {
2721                         printk(KERN_ERR "btrfs: balance will reduce metadata "
2722                                "integrity, use force if you want this\n");
2723                         ret = -EINVAL;
2724                         goto out;
2725                 }
2726         }
2727
2728 do_balance:
2729         ret = insert_balance_item(fs_info->tree_root, bctl);
2730         if (ret && ret != -EEXIST)
2731                 goto out;
2732
2733         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2734                 BUG_ON(ret == -EEXIST);
2735                 set_balance_control(bctl);
2736         } else {
2737                 BUG_ON(ret != -EEXIST);
2738                 spin_lock(&fs_info->balance_lock);
2739                 update_balance_args(bctl);
2740                 spin_unlock(&fs_info->balance_lock);
2741         }
2742
2743         atomic_inc(&fs_info->balance_running);
2744         mutex_unlock(&fs_info->balance_mutex);
2745
2746         ret = __btrfs_balance(fs_info);
2747
2748         mutex_lock(&fs_info->balance_mutex);
2749         atomic_dec(&fs_info->balance_running);
2750
2751         if (bargs) {
2752                 memset(bargs, 0, sizeof(*bargs));
2753                 update_ioctl_balance_args(fs_info, 0, bargs);
2754         }
2755
2756         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2757             balance_need_close(fs_info)) {
2758                 __cancel_balance(fs_info);
2759         }
2760
2761         wake_up(&fs_info->balance_wait_q);
2762
2763         return ret;
2764 out:
2765         if (bctl->flags & BTRFS_BALANCE_RESUME)
2766                 __cancel_balance(fs_info);
2767         else
2768                 kfree(bctl);
2769         return ret;
2770 }
2771
2772 static int balance_kthread(void *data)
2773 {
2774         struct btrfs_balance_control *bctl =
2775                         (struct btrfs_balance_control *)data;
2776         struct btrfs_fs_info *fs_info = bctl->fs_info;
2777         int ret = 0;
2778
2779         mutex_lock(&fs_info->volume_mutex);
2780         mutex_lock(&fs_info->balance_mutex);
2781
2782         set_balance_control(bctl);
2783
2784         if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2785                 printk(KERN_INFO "btrfs: force skipping balance\n");
2786         } else {
2787                 printk(KERN_INFO "btrfs: continuing balance\n");
2788                 ret = btrfs_balance(bctl, NULL);
2789         }
2790
2791         mutex_unlock(&fs_info->balance_mutex);
2792         mutex_unlock(&fs_info->volume_mutex);
2793         return ret;
2794 }
2795
2796 int btrfs_recover_balance(struct btrfs_root *tree_root)
2797 {
2798         struct task_struct *tsk;
2799         struct btrfs_balance_control *bctl;
2800         struct btrfs_balance_item *item;
2801         struct btrfs_disk_balance_args disk_bargs;
2802         struct btrfs_path *path;
2803         struct extent_buffer *leaf;
2804         struct btrfs_key key;
2805         int ret;
2806
2807         path = btrfs_alloc_path();
2808         if (!path)
2809                 return -ENOMEM;
2810
2811         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2812         if (!bctl) {
2813                 ret = -ENOMEM;
2814                 goto out;
2815         }
2816
2817         key.objectid = BTRFS_BALANCE_OBJECTID;
2818         key.type = BTRFS_BALANCE_ITEM_KEY;
2819         key.offset = 0;
2820
2821         ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2822         if (ret < 0)
2823                 goto out_bctl;
2824         if (ret > 0) { /* ret = -ENOENT; */
2825                 ret = 0;
2826                 goto out_bctl;
2827         }
2828
2829         leaf = path->nodes[0];
2830         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2831
2832         bctl->fs_info = tree_root->fs_info;
2833         bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2834
2835         btrfs_balance_data(leaf, item, &disk_bargs);
2836         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2837         btrfs_balance_meta(leaf, item, &disk_bargs);
2838         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2839         btrfs_balance_sys(leaf, item, &disk_bargs);
2840         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2841
2842         tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2843         if (IS_ERR(tsk))
2844                 ret = PTR_ERR(tsk);
2845         else
2846                 goto out;
2847
2848 out_bctl:
2849         kfree(bctl);
2850 out:
2851         btrfs_free_path(path);
2852         return ret;
2853 }
2854
2855 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2856 {
2857         int ret = 0;
2858
2859         mutex_lock(&fs_info->balance_mutex);
2860         if (!fs_info->balance_ctl) {
2861                 mutex_unlock(&fs_info->balance_mutex);
2862                 return -ENOTCONN;
2863         }
2864
2865         if (atomic_read(&fs_info->balance_running)) {
2866                 atomic_inc(&fs_info->balance_pause_req);
2867                 mutex_unlock(&fs_info->balance_mutex);
2868
2869                 wait_event(fs_info->balance_wait_q,
2870                            atomic_read(&fs_info->balance_running) == 0);
2871
2872                 mutex_lock(&fs_info->balance_mutex);
2873                 /* we are good with balance_ctl ripped off from under us */
2874                 BUG_ON(atomic_read(&fs_info->balance_running));
2875                 atomic_dec(&fs_info->balance_pause_req);
2876         } else {
2877                 ret = -ENOTCONN;
2878         }
2879
2880         mutex_unlock(&fs_info->balance_mutex);
2881         return ret;
2882 }
2883
2884 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2885 {
2886         mutex_lock(&fs_info->balance_mutex);
2887         if (!fs_info->balance_ctl) {
2888                 mutex_unlock(&fs_info->balance_mutex);
2889                 return -ENOTCONN;
2890         }
2891
2892         atomic_inc(&fs_info->balance_cancel_req);
2893         /*
2894          * if we are running just wait and return, balance item is
2895          * deleted in btrfs_balance in this case
2896          */
2897         if (atomic_read(&fs_info->balance_running)) {
2898                 mutex_unlock(&fs_info->balance_mutex);
2899                 wait_event(fs_info->balance_wait_q,
2900                            atomic_read(&fs_info->balance_running) == 0);
2901                 mutex_lock(&fs_info->balance_mutex);
2902         } else {
2903                 /* __cancel_balance needs volume_mutex */
2904                 mutex_unlock(&fs_info->balance_mutex);
2905                 mutex_lock(&fs_info->volume_mutex);
2906                 mutex_lock(&fs_info->balance_mutex);
2907
2908                 if (fs_info->balance_ctl)
2909                         __cancel_balance(fs_info);
2910
2911                 mutex_unlock(&fs_info->volume_mutex);
2912         }
2913
2914         BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2915         atomic_dec(&fs_info->balance_cancel_req);
2916         mutex_unlock(&fs_info->balance_mutex);
2917         return 0;
2918 }
2919
2920 /*
2921  * shrinking a device means finding all of the device extents past
2922  * the new size, and then following the back refs to the chunks.
2923  * The chunk relocation code actually frees the device extent
2924  */
2925 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2926 {
2927         struct btrfs_trans_handle *trans;
2928         struct btrfs_root *root = device->dev_root;
2929         struct btrfs_dev_extent *dev_extent = NULL;
2930         struct btrfs_path *path;
2931         u64 length;
2932         u64 chunk_tree;
2933         u64 chunk_objectid;
2934         u64 chunk_offset;
2935         int ret;
2936         int slot;
2937         int failed = 0;
2938         bool retried = false;
2939         struct extent_buffer *l;
2940         struct btrfs_key key;
2941         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2942         u64 old_total = btrfs_super_total_bytes(super_copy);
2943         u64 old_size = device->total_bytes;
2944         u64 diff = device->total_bytes - new_size;
2945
2946         if (new_size >= device->total_bytes)
2947                 return -EINVAL;
2948
2949         path = btrfs_alloc_path();
2950         if (!path)
2951                 return -ENOMEM;
2952
2953         path->reada = 2;
2954
2955         lock_chunks(root);
2956
2957         device->total_bytes = new_size;
2958         if (device->writeable) {
2959                 device->fs_devices->total_rw_bytes -= diff;
2960                 spin_lock(&root->fs_info->free_chunk_lock);
2961                 root->fs_info->free_chunk_space -= diff;
2962                 spin_unlock(&root->fs_info->free_chunk_lock);
2963         }
2964         unlock_chunks(root);
2965
2966 again:
2967         key.objectid = device->devid;
2968         key.offset = (u64)-1;
2969         key.type = BTRFS_DEV_EXTENT_KEY;
2970
2971         while (1) {
2972                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2973                 if (ret < 0)
2974                         goto done;
2975
2976                 ret = btrfs_previous_item(root, path, 0, key.type);
2977                 if (ret < 0)
2978                         goto done;
2979                 if (ret) {
2980                         ret = 0;
2981                         btrfs_release_path(path);
2982                         break;
2983                 }
2984
2985                 l = path->nodes[0];
2986                 slot = path->slots[0];
2987                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
2988
2989                 if (key.objectid != device->devid) {
2990                         btrfs_release_path(path);
2991                         break;
2992                 }
2993
2994                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2995                 length = btrfs_dev_extent_length(l, dev_extent);
2996
2997                 if (key.offset + length <= new_size) {
2998                         btrfs_release_path(path);
2999                         break;
3000                 }
3001
3002                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3003                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3004                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3005                 btrfs_release_path(path);
3006
3007                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3008                                            chunk_offset);
3009                 if (ret && ret != -ENOSPC)
3010                         goto done;
3011                 if (ret == -ENOSPC)
3012                         failed++;
3013                 key.offset -= 1;
3014         }
3015
3016         if (failed && !retried) {
3017                 failed = 0;
3018                 retried = true;
3019                 goto again;
3020         } else if (failed && retried) {
3021                 ret = -ENOSPC;
3022                 lock_chunks(root);
3023
3024                 device->total_bytes = old_size;
3025                 if (device->writeable)
3026                         device->fs_devices->total_rw_bytes += diff;
3027                 spin_lock(&root->fs_info->free_chunk_lock);
3028                 root->fs_info->free_chunk_space += diff;
3029                 spin_unlock(&root->fs_info->free_chunk_lock);
3030                 unlock_chunks(root);
3031                 goto done;
3032         }
3033
3034         /* Shrinking succeeded, else we would be at "done". */
3035         trans = btrfs_start_transaction(root, 0);
3036         if (IS_ERR(trans)) {
3037                 ret = PTR_ERR(trans);
3038                 goto done;
3039         }
3040
3041         lock_chunks(root);
3042
3043         device->disk_total_bytes = new_size;
3044         /* Now btrfs_update_device() will change the on-disk size. */
3045         ret = btrfs_update_device(trans, device);
3046         if (ret) {
3047                 unlock_chunks(root);
3048                 btrfs_end_transaction(trans, root);
3049                 goto done;
3050         }
3051         WARN_ON(diff > old_total);
3052         btrfs_set_super_total_bytes(super_copy, old_total - diff);
3053         unlock_chunks(root);
3054         btrfs_end_transaction(trans, root);
3055 done:
3056         btrfs_free_path(path);
3057         return ret;
3058 }
3059
3060 static int btrfs_add_system_chunk(struct btrfs_root *root,
3061                            struct btrfs_key *key,
3062                            struct btrfs_chunk *chunk, int item_size)
3063 {
3064         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3065         struct btrfs_disk_key disk_key;
3066         u32 array_size;
3067         u8 *ptr;
3068
3069         array_size = btrfs_super_sys_array_size(super_copy);
3070         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3071                 return -EFBIG;
3072
3073         ptr = super_copy->sys_chunk_array + array_size;
3074         btrfs_cpu_key_to_disk(&disk_key, key);
3075         memcpy(ptr, &disk_key, sizeof(disk_key));
3076         ptr += sizeof(disk_key);
3077         memcpy(ptr, chunk, item_size);
3078         item_size += sizeof(disk_key);
3079         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3080         return 0;
3081 }
3082
3083 /*
3084  * sort the devices in descending order by max_avail, total_avail
3085  */
3086 static int btrfs_cmp_device_info(const void *a, const void *b)
3087 {
3088         const struct btrfs_device_info *di_a = a;
3089         const struct btrfs_device_info *di_b = b;
3090
3091         if (di_a->max_avail > di_b->max_avail)
3092                 return -1;
3093         if (di_a->max_avail < di_b->max_avail)
3094                 return 1;
3095         if (di_a->total_avail > di_b->total_avail)
3096                 return -1;
3097         if (di_a->total_avail < di_b->total_avail)
3098                 return 1;
3099         return 0;
3100 }
3101
3102 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3103                                struct btrfs_root *extent_root,
3104                                struct map_lookup **map_ret,
3105                                u64 *num_bytes_out, u64 *stripe_size_out,
3106                                u64 start, u64 type)
3107 {
3108         struct btrfs_fs_info *info = extent_root->fs_info;
3109         struct btrfs_fs_devices *fs_devices = info->fs_devices;
3110         struct list_head *cur;
3111         struct map_lookup *map = NULL;
3112         struct extent_map_tree *em_tree;
3113         struct extent_map *em;
3114         struct btrfs_device_info *devices_info = NULL;
3115         u64 total_avail;
3116         int num_stripes;        /* total number of stripes to allocate */
3117         int sub_stripes;        /* sub_stripes info for map */
3118         int dev_stripes;        /* stripes per dev */
3119         int devs_max;           /* max devs to use */
3120         int devs_min;           /* min devs needed */
3121         int devs_increment;     /* ndevs has to be a multiple of this */
3122         int ncopies;            /* how many copies to data has */
3123         int ret;
3124         u64 max_stripe_size;
3125         u64 max_chunk_size;
3126         u64 stripe_size;
3127         u64 num_bytes;
3128         int ndevs;
3129         int i;
3130         int j;
3131
3132         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
3133             (type & BTRFS_BLOCK_GROUP_DUP)) {
3134                 WARN_ON(1);
3135                 type &= ~BTRFS_BLOCK_GROUP_DUP;
3136         }
3137
3138         if (list_empty(&fs_devices->alloc_list))
3139                 return -ENOSPC;
3140
3141         sub_stripes = 1;
3142         dev_stripes = 1;
3143         devs_increment = 1;
3144         ncopies = 1;
3145         devs_max = 0;   /* 0 == as many as possible */
3146         devs_min = 1;
3147
3148         /*
3149          * define the properties of each RAID type.
3150          * FIXME: move this to a global table and use it in all RAID
3151          * calculation code
3152          */
3153         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3154                 dev_stripes = 2;
3155                 ncopies = 2;
3156                 devs_max = 1;
3157         } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3158                 devs_min = 2;
3159         } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3160                 devs_increment = 2;
3161                 ncopies = 2;
3162                 devs_max = 2;
3163                 devs_min = 2;
3164         } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3165                 sub_stripes = 2;
3166                 devs_increment = 2;
3167                 ncopies = 2;
3168                 devs_min = 4;
3169         } else {
3170                 devs_max = 1;
3171         }
3172
3173         if (type & BTRFS_BLOCK_GROUP_DATA) {
3174                 max_stripe_size = 1024 * 1024 * 1024;
3175                 max_chunk_size = 10 * max_stripe_size;
3176         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3177                 /* for larger filesystems, use larger metadata chunks */
3178                 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3179                         max_stripe_size = 1024 * 1024 * 1024;
3180                 else
3181                         max_stripe_size = 256 * 1024 * 1024;
3182                 max_chunk_size = max_stripe_size;
3183         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3184                 max_stripe_size = 32 * 1024 * 1024;
3185                 max_chunk_size = 2 * max_stripe_size;
3186         } else {
3187                 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3188                        type);
3189                 BUG_ON(1);
3190         }
3191
3192         /* we don't want a chunk larger than 10% of writeable space */
3193         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3194                              max_chunk_size);
3195
3196         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3197                                GFP_NOFS);
3198         if (!devices_info)
3199                 return -ENOMEM;
3200
3201         cur = fs_devices->alloc_list.next;
3202
3203         /*
3204          * in the first pass through the devices list, we gather information
3205          * about the available holes on each device.
3206          */
3207         ndevs = 0;
3208         while (cur != &fs_devices->alloc_list) {
3209                 struct btrfs_device *device;
3210                 u64 max_avail;
3211                 u64 dev_offset;
3212
3213                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3214
3215                 cur = cur->next;
3216
3217                 if (!device->writeable) {
3218                         printk(KERN_ERR
3219                                "btrfs: read-only device in alloc_list\n");
3220                         WARN_ON(1);
3221                         continue;
3222                 }
3223
3224                 if (!device->in_fs_metadata)
3225                         continue;
3226
3227                 if (device->total_bytes > device->bytes_used)
3228                         total_avail = device->total_bytes - device->bytes_used;
3229                 else
3230                         total_avail = 0;
3231
3232                 /* If there is no space on this device, skip it. */
3233                 if (total_avail == 0)
3234                         continue;
3235
3236                 ret = find_free_dev_extent(device,
3237                                            max_stripe_size * dev_stripes,
3238                                            &dev_offset, &max_avail);
3239                 if (ret && ret != -ENOSPC)
3240                         goto error;
3241
3242                 if (ret == 0)
3243                         max_avail = max_stripe_size * dev_stripes;
3244
3245                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3246                         continue;
3247
3248                 devices_info[ndevs].dev_offset = dev_offset;
3249                 devices_info[ndevs].max_avail = max_avail;
3250                 devices_info[ndevs].total_avail = total_avail;
3251                 devices_info[ndevs].dev = device;
3252                 ++ndevs;
3253         }
3254
3255         /*
3256          * now sort the devices by hole size / available space
3257          */
3258         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3259              btrfs_cmp_device_info, NULL);
3260
3261         /* round down to number of usable stripes */
3262         ndevs -= ndevs % devs_increment;
3263
3264         if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3265                 ret = -ENOSPC;
3266                 goto error;
3267         }
3268
3269         if (devs_max && ndevs > devs_max)
3270                 ndevs = devs_max;
3271         /*
3272          * the primary goal is to maximize the number of stripes, so use as many
3273          * devices as possible, even if the stripes are not maximum sized.
3274          */
3275         stripe_size = devices_info[ndevs-1].max_avail;
3276         num_stripes = ndevs * dev_stripes;
3277
3278         if (stripe_size * num_stripes > max_chunk_size * ncopies) {
3279                 stripe_size = max_chunk_size * ncopies;
3280                 do_div(stripe_size, num_stripes);
3281         }
3282
3283         do_div(stripe_size, dev_stripes);
3284         do_div(stripe_size, BTRFS_STRIPE_LEN);
3285         stripe_size *= BTRFS_STRIPE_LEN;
3286
3287         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3288         if (!map) {
3289                 ret = -ENOMEM;
3290                 goto error;
3291         }
3292         map->num_stripes = num_stripes;
3293
3294         for (i = 0; i < ndevs; ++i) {
3295                 for (j = 0; j < dev_stripes; ++j) {
3296                         int s = i * dev_stripes + j;
3297                         map->stripes[s].dev = devices_info[i].dev;
3298                         map->stripes[s].physical = devices_info[i].dev_offset +
3299                                                    j * stripe_size;
3300                 }
3301         }
3302         map->sector_size = extent_root->sectorsize;
3303         map->stripe_len = BTRFS_STRIPE_LEN;
3304         map->io_align = BTRFS_STRIPE_LEN;
3305         map->io_width = BTRFS_STRIPE_LEN;
3306         map->type = type;
3307         map->sub_stripes = sub_stripes;
3308
3309         *map_ret = map;
3310         num_bytes = stripe_size * (num_stripes / ncopies);
3311
3312         *stripe_size_out = stripe_size;
3313         *num_bytes_out = num_bytes;
3314
3315         trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3316
3317         em = alloc_extent_map();
3318         if (!em) {
3319                 ret = -ENOMEM;
3320                 goto error;
3321         }
3322         em->bdev = (struct block_device *)map;
3323         em->start = start;
3324         em->len = num_bytes;
3325         em->block_start = 0;
3326         em->block_len = em->len;
3327
3328         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3329         write_lock(&em_tree->lock);
3330         ret = add_extent_mapping(em_tree, em);
3331         write_unlock(&em_tree->lock);
3332         BUG_ON(ret);
3333         free_extent_map(em);
3334
3335         ret = btrfs_make_block_group(trans, extent_root, 0, type,
3336                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3337                                      start, num_bytes);
3338         BUG_ON(ret);
3339
3340         for (i = 0; i < map->num_stripes; ++i) {
3341                 struct btrfs_device *device;
3342                 u64 dev_offset;
3343
3344                 device = map->stripes[i].dev;
3345                 dev_offset = map->stripes[i].physical;
3346
3347                 ret = btrfs_alloc_dev_extent(trans, device,
3348                                 info->chunk_root->root_key.objectid,
3349                                 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3350                                 start, dev_offset, stripe_size);
3351                 BUG_ON(ret);
3352         }
3353
3354         kfree(devices_info);
3355         return 0;
3356
3357 error:
3358         kfree(map);
3359         kfree(devices_info);
3360         return ret;
3361 }
3362
3363 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3364                                 struct btrfs_root *extent_root,
3365                                 struct map_lookup *map, u64 chunk_offset,
3366                                 u64 chunk_size, u64 stripe_size)
3367 {
3368         u64 dev_offset;
3369         struct btrfs_key key;
3370         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3371         struct btrfs_device *device;
3372         struct btrfs_chunk *chunk;
3373         struct btrfs_stripe *stripe;
3374         size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3375         int index = 0;
3376         int ret;
3377
3378         chunk = kzalloc(item_size, GFP_NOFS);
3379         if (!chunk)
3380                 return -ENOMEM;
3381
3382         index = 0;
3383         while (index < map->num_stripes) {
3384                 device = map->stripes[index].dev;
3385                 device->bytes_used += stripe_size;
3386                 ret = btrfs_update_device(trans, device);
3387                 BUG_ON(ret);
3388                 index++;
3389         }
3390
3391         spin_lock(&extent_root->fs_info->free_chunk_lock);
3392         extent_root->fs_info->free_chunk_space -= (stripe_size *
3393                                                    map->num_stripes);
3394         spin_unlock(&extent_root->fs_info->free_chunk_lock);
3395
3396         index = 0;
3397         stripe = &chunk->stripe;
3398         while (index < map->num_stripes) {
3399                 device = map->stripes[index].dev;
3400                 dev_offset = map->stripes[index].physical;
3401
3402                 btrfs_set_stack_stripe_devid(stripe, device->devid);
3403                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3404                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3405                 stripe++;
3406                 index++;
3407         }
3408
3409         btrfs_set_stack_chunk_length(chunk, chunk_size);
3410         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3411         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3412         btrfs_set_stack_chunk_type(chunk, map->type);
3413         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3414         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3415         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3416         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3417         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3418
3419         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3420         key.type = BTRFS_CHUNK_ITEM_KEY;
3421         key.offset = chunk_offset;
3422
3423         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3424         BUG_ON(ret);
3425
3426         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3427                 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3428                                              item_size);
3429                 BUG_ON(ret);
3430         }
3431
3432         kfree(chunk);
3433         return 0;
3434 }
3435
3436 /*
3437  * Chunk allocation falls into two parts. The first part does works
3438  * that make the new allocated chunk useable, but not do any operation
3439  * that modifies the chunk tree. The second part does the works that
3440  * require modifying the chunk tree. This division is important for the
3441  * bootstrap process of adding storage to a seed btrfs.
3442  */
3443 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3444                       struct btrfs_root *extent_root, u64 type)
3445 {
3446         u64 chunk_offset;
3447         u64 chunk_size;
3448         u64 stripe_size;
3449         struct map_lookup *map;
3450         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3451         int ret;
3452
3453         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3454                               &chunk_offset);
3455         if (ret)
3456                 return ret;
3457
3458         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3459                                   &stripe_size, chunk_offset, type);
3460         if (ret)
3461                 return ret;
3462
3463         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3464                                    chunk_size, stripe_size);
3465         BUG_ON(ret);
3466         return 0;
3467 }
3468
3469 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3470                                          struct btrfs_root *root,
3471                                          struct btrfs_device *device)
3472 {
3473         u64 chunk_offset;
3474         u64 sys_chunk_offset;
3475         u64 chunk_size;
3476         u64 sys_chunk_size;
3477         u64 stripe_size;
3478         u64 sys_stripe_size;
3479         u64 alloc_profile;
3480         struct map_lookup *map;
3481         struct map_lookup *sys_map;
3482         struct btrfs_fs_info *fs_info = root->fs_info;
3483         struct btrfs_root *extent_root = fs_info->extent_root;
3484         int ret;
3485
3486         ret = find_next_chunk(fs_info->chunk_root,
3487                               BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3488         if (ret)
3489                 return ret;
3490
3491         alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3492                                 fs_info->avail_metadata_alloc_bits;
3493         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3494
3495         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3496                                   &stripe_size, chunk_offset, alloc_profile);
3497         BUG_ON(ret);
3498
3499         sys_chunk_offset = chunk_offset + chunk_size;
3500
3501         alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3502                                 fs_info->avail_system_alloc_bits;
3503         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3504
3505         ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3506                                   &sys_chunk_size, &sys_stripe_size,
3507                                   sys_chunk_offset, alloc_profile);
3508         BUG_ON(ret);
3509
3510         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3511         BUG_ON(ret);
3512
3513         /*
3514          * Modifying chunk tree needs allocating new blocks from both
3515          * system block group and metadata block group. So we only can
3516          * do operations require modifying the chunk tree after both
3517          * block groups were created.
3518          */
3519         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3520                                    chunk_size, stripe_size);
3521         BUG_ON(ret);
3522
3523         ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3524                                    sys_chunk_offset, sys_chunk_size,
3525                                    sys_stripe_size);
3526         BUG_ON(ret);
3527         return 0;
3528 }
3529
3530 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3531 {
3532         struct extent_map *em;
3533         struct map_lookup *map;
3534         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3535         int readonly = 0;
3536         int i;
3537
3538         read_lock(&map_tree->map_tree.lock);
3539         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3540         read_unlock(&map_tree->map_tree.lock);
3541         if (!em)
3542                 return 1;
3543
3544         if (btrfs_test_opt(root, DEGRADED)) {
3545                 free_extent_map(em);
3546                 return 0;
3547         }
3548
3549         map = (struct map_lookup *)em->bdev;
3550         for (i = 0; i < map->num_stripes; i++) {
3551                 if (!map->stripes[i].dev->writeable) {
3552                         readonly = 1;
3553                         break;
3554                 }
3555         }
3556         free_extent_map(em);
3557         return readonly;
3558 }
3559
3560 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3561 {
3562         extent_map_tree_init(&tree->map_tree);
3563 }
3564
3565 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3566 {
3567         struct extent_map *em;
3568
3569         while (1) {
3570                 write_lock(&tree->map_tree.lock);
3571                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3572                 if (em)
3573                         remove_extent_mapping(&tree->map_tree, em);
3574                 write_unlock(&tree->map_tree.lock);
3575                 if (!em)
3576                         break;
3577                 kfree(em->bdev);
3578                 /* once for us */
3579                 free_extent_map(em);
3580                 /* once for the tree */
3581                 free_extent_map(em);
3582         }
3583 }
3584
3585 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3586 {
3587         struct extent_map *em;
3588         struct map_lookup *map;
3589         struct extent_map_tree *em_tree = &map_tree->map_tree;
3590         int ret;
3591
3592         read_lock(&em_tree->lock);
3593         em = lookup_extent_mapping(em_tree, logical, len);
3594         read_unlock(&em_tree->lock);
3595         BUG_ON(!em);
3596
3597         BUG_ON(em->start > logical || em->start + em->len < logical);
3598         map = (struct map_lookup *)em->bdev;
3599         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3600                 ret = map->num_stripes;
3601         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3602                 ret = map->sub_stripes;
3603         else
3604                 ret = 1;
3605         free_extent_map(em);
3606         return ret;
3607 }
3608
3609 static int find_live_mirror(struct map_lookup *map, int first, int num,
3610                             int optimal)
3611 {
3612         int i;
3613         if (map->stripes[optimal].dev->bdev)
3614                 return optimal;
3615         for (i = first; i < first + num; i++) {
3616                 if (map->stripes[i].dev->bdev)
3617                         return i;
3618         }
3619         /* we couldn't find one that doesn't fail.  Just return something
3620          * and the io error handling code will clean up eventually
3621          */
3622         return optimal;
3623 }
3624
3625 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3626                              u64 logical, u64 *length,
3627                              struct btrfs_bio **bbio_ret,
3628                              int mirror_num)
3629 {
3630         struct extent_map *em;
3631         struct map_lookup *map;
3632         struct extent_map_tree *em_tree = &map_tree->map_tree;
3633         u64 offset;
3634         u64 stripe_offset;
3635         u64 stripe_end_offset;
3636         u64 stripe_nr;
3637         u64 stripe_nr_orig;
3638         u64 stripe_nr_end;
3639         int stripe_index;
3640         int i;
3641         int ret = 0;
3642         int num_stripes;
3643         int max_errors = 0;
3644         struct btrfs_bio *bbio = NULL;
3645
3646         read_lock(&em_tree->lock);
3647         em = lookup_extent_mapping(em_tree, logical, *length);
3648         read_unlock(&em_tree->lock);
3649
3650         if (!em) {
3651                 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3652                        (unsigned long long)logical,
3653                        (unsigned long long)*length);
3654                 BUG();
3655         }
3656
3657         BUG_ON(em->start > logical || em->start + em->len < logical);
3658         map = (struct map_lookup *)em->bdev;
3659         offset = logical - em->start;
3660
3661         if (mirror_num > map->num_stripes)
3662                 mirror_num = 0;
3663
3664         stripe_nr = offset;
3665         /*
3666          * stripe_nr counts the total number of stripes we have to stride
3667          * to get to this block
3668          */
3669         do_div(stripe_nr, map->stripe_len);
3670
3671         stripe_offset = stripe_nr * map->stripe_len;
3672         BUG_ON(offset < stripe_offset);
3673
3674         /* stripe_offset is the offset of this block in its stripe*/
3675         stripe_offset = offset - stripe_offset;
3676
3677         if (rw & REQ_DISCARD)
3678                 *length = min_t(u64, em->len - offset, *length);
3679         else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3680                 /* we limit the length of each bio to what fits in a stripe */
3681                 *length = min_t(u64, em->len - offset,
3682                                 map->stripe_len - stripe_offset);
3683         } else {
3684                 *length = em->len - offset;
3685         }
3686
3687         if (!bbio_ret)
3688                 goto out;
3689
3690         num_stripes = 1;
3691         stripe_index = 0;
3692         stripe_nr_orig = stripe_nr;
3693         stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3694                         (~(map->stripe_len - 1));
3695         do_div(stripe_nr_end, map->stripe_len);
3696         stripe_end_offset = stripe_nr_end * map->stripe_len -
3697                             (offset + *length);
3698         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3699                 if (rw & REQ_DISCARD)
3700                         num_stripes = min_t(u64, map->num_stripes,
3701                                             stripe_nr_end - stripe_nr_orig);
3702                 stripe_index = do_div(stripe_nr, map->num_stripes);
3703         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3704                 if (rw & (REQ_WRITE | REQ_DISCARD))
3705                         num_stripes = map->num_stripes;
3706                 else if (mirror_num)
3707                         stripe_index = mirror_num - 1;
3708                 else {
3709                         stripe_index = find_live_mirror(map, 0,
3710                                             map->num_stripes,
3711                                             current->pid % map->num_stripes);
3712                         mirror_num = stripe_index + 1;
3713                 }
3714
3715         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3716                 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3717                         num_stripes = map->num_stripes;
3718                 } else if (mirror_num) {
3719                         stripe_index = mirror_num - 1;
3720                 } else {
3721                         mirror_num = 1;
3722                 }
3723
3724         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3725                 int factor = map->num_stripes / map->sub_stripes;
3726
3727                 stripe_index = do_div(stripe_nr, factor);
3728                 stripe_index *= map->sub_stripes;
3729
3730                 if (rw & REQ_WRITE)
3731                         num_stripes = map->sub_stripes;
3732                 else if (rw & REQ_DISCARD)
3733                         num_stripes = min_t(u64, map->sub_stripes *
3734                                             (stripe_nr_end - stripe_nr_orig),
3735                                             map->num_stripes);
3736                 else if (mirror_num)
3737                         stripe_index += mirror_num - 1;
3738                 else {
3739                         stripe_index = find_live_mirror(map, stripe_index,
3740                                               map->sub_stripes, stripe_index +
3741                                               current->pid % map->sub_stripes);
3742                         mirror_num = stripe_index + 1;
3743                 }
3744         } else {
3745                 /*
3746                  * after this do_div call, stripe_nr is the number of stripes
3747                  * on this device we have to walk to find the data, and
3748                  * stripe_index is the number of our device in the stripe array
3749                  */
3750                 stripe_index = do_div(stripe_nr, map->num_stripes);
3751                 mirror_num = stripe_index + 1;
3752         }
3753         BUG_ON(stripe_index >= map->num_stripes);
3754
3755         bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3756         if (!bbio) {
3757                 ret = -ENOMEM;
3758                 goto out;
3759         }
3760         atomic_set(&bbio->error, 0);
3761
3762         if (rw & REQ_DISCARD) {
3763                 int factor = 0;
3764                 int sub_stripes = 0;
3765                 u64 stripes_per_dev = 0;
3766                 u32 remaining_stripes = 0;
3767
3768                 if (map->type &
3769                     (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3770                         if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3771                                 sub_stripes = 1;
3772                         else
3773                                 sub_stripes = map->sub_stripes;
3774
3775                         factor = map->num_stripes / sub_stripes;
3776                         stripes_per_dev = div_u64_rem(stripe_nr_end -
3777                                                       stripe_nr_orig,
3778                                                       factor,
3779                                                       &remaining_stripes);
3780                 }
3781
3782                 for (i = 0; i < num_stripes; i++) {
3783                         bbio->stripes[i].physical =
3784                                 map->stripes[stripe_index].physical +
3785                                 stripe_offset + stripe_nr * map->stripe_len;
3786                         bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3787
3788                         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3789                                          BTRFS_BLOCK_GROUP_RAID10)) {
3790                                 bbio->stripes[i].length = stripes_per_dev *
3791                                                           map->stripe_len;
3792                                 if (i / sub_stripes < remaining_stripes)
3793                                         bbio->stripes[i].length +=
3794                                                 map->stripe_len;
3795                                 if (i < sub_stripes)
3796                                         bbio->stripes[i].length -=
3797                                                 stripe_offset;
3798                                 if ((i / sub_stripes + 1) %
3799                                     sub_stripes == remaining_stripes)
3800                                         bbio->stripes[i].length -=
3801                                                 stripe_end_offset;
3802                                 if (i == sub_stripes - 1)
3803                                         stripe_offset = 0;
3804                         } else
3805                                 bbio->stripes[i].length = *length;
3806
3807                         stripe_index++;
3808                         if (stripe_index == map->num_stripes) {
3809                                 /* This could only happen for RAID0/10 */
3810                                 stripe_index = 0;
3811                                 stripe_nr++;
3812                         }
3813                 }
3814         } else {
3815                 for (i = 0; i < num_stripes; i++) {
3816                         bbio->stripes[i].physical =
3817                                 map->stripes[stripe_index].physical +
3818                                 stripe_offset +
3819                                 stripe_nr * map->stripe_len;
3820                         bbio->stripes[i].dev =
3821                                 map->stripes[stripe_index].dev;
3822                         stripe_index++;
3823                 }
3824         }
3825
3826         if (rw & REQ_WRITE) {
3827                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3828                                  BTRFS_BLOCK_GROUP_RAID10 |
3829                                  BTRFS_BLOCK_GROUP_DUP)) {
3830                         max_errors = 1;
3831                 }
3832         }
3833
3834         *bbio_ret = bbio;
3835         bbio->num_stripes = num_stripes;
3836         bbio->max_errors = max_errors;
3837         bbio->mirror_num = mirror_num;
3838 out:
3839         free_extent_map(em);
3840         return ret;
3841 }
3842
3843 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3844                       u64 logical, u64 *length,
3845                       struct btrfs_bio **bbio_ret, int mirror_num)
3846 {
3847         return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3848                                  mirror_num);
3849 }
3850
3851 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3852                      u64 chunk_start, u64 physical, u64 devid,
3853                      u64 **logical, int *naddrs, int *stripe_len)
3854 {
3855         struct extent_map_tree *em_tree = &map_tree->map_tree;
3856         struct extent_map *em;
3857         struct map_lookup *map;
3858         u64 *buf;
3859         u64 bytenr;
3860         u64 length;
3861         u64 stripe_nr;
3862         int i, j, nr = 0;
3863
3864         read_lock(&em_tree->lock);
3865         em = lookup_extent_mapping(em_tree, chunk_start, 1);
3866         read_unlock(&em_tree->lock);
3867
3868         BUG_ON(!em || em->start != chunk_start);
3869         map = (struct map_lookup *)em->bdev;
3870
3871         length = em->len;
3872         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3873                 do_div(length, map->num_stripes / map->sub_stripes);
3874         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3875                 do_div(length, map->num_stripes);
3876
3877         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3878         BUG_ON(!buf);
3879
3880         for (i = 0; i < map->num_stripes; i++) {
3881                 if (devid && map->stripes[i].dev->devid != devid)
3882                         continue;
3883                 if (map->stripes[i].physical > physical ||
3884                     map->stripes[i].physical + length <= physical)
3885                         continue;
3886
3887                 stripe_nr = physical - map->stripes[i].physical;
3888                 do_div(stripe_nr, map->stripe_len);
3889
3890                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3891                         stripe_nr = stripe_nr * map->num_stripes + i;
3892                         do_div(stripe_nr, map->sub_stripes);
3893                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3894                         stripe_nr = stripe_nr * map->num_stripes + i;
3895                 }
3896                 bytenr = chunk_start + stripe_nr * map->stripe_len;
3897                 WARN_ON(nr >= map->num_stripes);
3898                 for (j = 0; j < nr; j++) {
3899                         if (buf[j] == bytenr)
3900                                 break;
3901                 }
3902                 if (j == nr) {
3903                         WARN_ON(nr >= map->num_stripes);
3904                         buf[nr++] = bytenr;
3905                 }
3906         }
3907
3908         *logical = buf;
3909         *naddrs = nr;
3910         *stripe_len = map->stripe_len;
3911
3912         free_extent_map(em);
3913         return 0;
3914 }
3915
3916 static void btrfs_end_bio(struct bio *bio, int err)
3917 {
3918         struct btrfs_bio *bbio = bio->bi_private;
3919         int is_orig_bio = 0;
3920
3921         if (err)
3922                 atomic_inc(&bbio->error);
3923
3924         if (bio == bbio->orig_bio)
3925                 is_orig_bio = 1;
3926
3927         if (atomic_dec_and_test(&bbio->stripes_pending)) {
3928                 if (!is_orig_bio) {
3929                         bio_put(bio);
3930                         bio = bbio->orig_bio;
3931                 }
3932                 bio->bi_private = bbio->private;
3933                 bio->bi_end_io = bbio->end_io;
3934                 bio->bi_bdev = (struct block_device *)
3935                                         (unsigned long)bbio->mirror_num;
3936                 /* only send an error to the higher layers if it is
3937                  * beyond the tolerance of the multi-bio
3938                  */
3939                 if (atomic_read(&bbio->error) > bbio->max_errors) {
3940                         err = -EIO;
3941                 } else {
3942                         /*
3943                          * this bio is actually up to date, we didn't
3944                          * go over the max number of errors
3945                          */
3946                         set_bit(BIO_UPTODATE, &bio->bi_flags);
3947                         err = 0;
3948                 }
3949                 kfree(bbio);
3950
3951                 bio_endio(bio, err);
3952         } else if (!is_orig_bio) {
3953                 bio_put(bio);
3954         }
3955 }
3956
3957 struct async_sched {
3958         struct bio *bio;
3959         int rw;
3960         struct btrfs_fs_info *info;
3961         struct btrfs_work work;
3962 };
3963
3964 /*
3965  * see run_scheduled_bios for a description of why bios are collected for
3966  * async submit.
3967  *
3968  * This will add one bio to the pending list for a device and make sure
3969  * the work struct is scheduled.
3970  */
3971 static noinline int schedule_bio(struct btrfs_root *root,
3972                                  struct btrfs_device *device,
3973                                  int rw, struct bio *bio)
3974 {
3975         int should_queue = 1;
3976         struct btrfs_pending_bios *pending_bios;
3977
3978         /* don't bother with additional async steps for reads, right now */
3979         if (!(rw & REQ_WRITE)) {
3980                 bio_get(bio);
3981                 btrfsic_submit_bio(rw, bio);
3982                 bio_put(bio);
3983                 return 0;
3984         }
3985
3986         /*
3987          * nr_async_bios allows us to reliably return congestion to the
3988          * higher layers.  Otherwise, the async bio makes it appear we have
3989          * made progress against dirty pages when we've really just put it
3990          * on a queue for later
3991          */
3992         atomic_inc(&root->fs_info->nr_async_bios);
3993         WARN_ON(bio->bi_next);
3994         bio->bi_next = NULL;
3995         bio->bi_rw |= rw;
3996
3997         spin_lock(&device->io_lock);
3998         if (bio->bi_rw & REQ_SYNC)
3999                 pending_bios = &device->pending_sync_bios;
4000         else
4001                 pending_bios = &device->pending_bios;
4002
4003         if (pending_bios->tail)
4004                 pending_bios->tail->bi_next = bio;
4005
4006         pending_bios->tail = bio;
4007         if (!pending_bios->head)
4008                 pending_bios->head = bio;
4009         if (device->running_pending)
4010                 should_queue = 0;
4011
4012         spin_unlock(&device->io_lock);
4013
4014         if (should_queue)
4015                 btrfs_queue_worker(&root->fs_info->submit_workers,
4016                                    &device->work);
4017         return 0;
4018 }
4019
4020 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4021                   int mirror_num, int async_submit)
4022 {
4023         struct btrfs_mapping_tree *map_tree;
4024         struct btrfs_device *dev;
4025         struct bio *first_bio = bio;
4026         u64 logical = (u64)bio->bi_sector << 9;
4027         u64 length = 0;
4028         u64 map_length;
4029         int ret;
4030         int dev_nr = 0;
4031         int total_devs = 1;
4032         struct btrfs_bio *bbio = NULL;
4033
4034         length = bio->bi_size;
4035         map_tree = &root->fs_info->mapping_tree;
4036         map_length = length;
4037
4038         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4039                               mirror_num);
4040         BUG_ON(ret);
4041
4042         total_devs = bbio->num_stripes;
4043         if (map_length < length) {
4044                 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4045                        "len %llu\n", (unsigned long long)logical,
4046                        (unsigned long long)length,
4047                        (unsigned long long)map_length);
4048                 BUG();
4049         }
4050
4051         bbio->orig_bio = first_bio;
4052         bbio->private = first_bio->bi_private;
4053         bbio->end_io = first_bio->bi_end_io;
4054         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4055
4056         while (dev_nr < total_devs) {
4057                 if (dev_nr < total_devs - 1) {
4058                         bio = bio_clone(first_bio, GFP_NOFS);
4059                         BUG_ON(!bio);
4060                 } else {
4061                         bio = first_bio;
4062                 }
4063                 bio->bi_private = bbio;
4064                 bio->bi_end_io = btrfs_end_bio;
4065                 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4066                 dev = bbio->stripes[dev_nr].dev;
4067                 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4068                         pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4069                                  "(%s id %llu), size=%u\n", rw,
4070                                  (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4071                                  dev->name, dev->devid, bio->bi_size);
4072                         bio->bi_bdev = dev->bdev;
4073                         if (async_submit)
4074                                 schedule_bio(root, dev, rw, bio);
4075                         else
4076                                 btrfsic_submit_bio(rw, bio);
4077                 } else {
4078                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4079                         bio->bi_sector = logical >> 9;
4080                         bio_endio(bio, -EIO);
4081                 }
4082                 dev_nr++;
4083         }
4084         return 0;
4085 }
4086
4087 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4088                                        u8 *uuid, u8 *fsid)
4089 {
4090         struct btrfs_device *device;
4091         struct btrfs_fs_devices *cur_devices;
4092
4093         cur_devices = root->fs_info->fs_devices;
4094         while (cur_devices) {
4095                 if (!fsid ||
4096                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4097                         device = __find_device(&cur_devices->devices,
4098                                                devid, uuid);
4099                         if (device)
4100                                 return device;
4101                 }
4102                 cur_devices = cur_devices->seed;
4103         }
4104         return NULL;
4105 }
4106
4107 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4108                                             u64 devid, u8 *dev_uuid)
4109 {
4110         struct btrfs_device *device;
4111         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4112
4113         device = kzalloc(sizeof(*device), GFP_NOFS);
4114         if (!device)
4115                 return NULL;
4116         list_add(&device->dev_list,
4117                  &fs_devices->devices);
4118         device->dev_root = root->fs_info->dev_root;
4119         device->devid = devid;
4120         device->work.func = pending_bios_fn;
4121         device->fs_devices = fs_devices;
4122         device->missing = 1;
4123         fs_devices->num_devices++;
4124         fs_devices->missing_devices++;
4125         spin_lock_init(&device->io_lock);
4126         INIT_LIST_HEAD(&device->dev_alloc_list);
4127         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4128         return device;
4129 }
4130
4131 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4132                           struct extent_buffer *leaf,
4133                           struct btrfs_chunk *chunk)
4134 {
4135         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4136         struct map_lookup *map;
4137         struct extent_map *em;
4138         u64 logical;
4139         u64 length;
4140         u64 devid;
4141         u8 uuid[BTRFS_UUID_SIZE];
4142         int num_stripes;
4143         int ret;
4144         int i;
4145
4146         logical = key->offset;
4147         length = btrfs_chunk_length(leaf, chunk);
4148
4149         read_lock(&map_tree->map_tree.lock);
4150         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4151         read_unlock(&map_tree->map_tree.lock);
4152
4153         /* already mapped? */
4154         if (em && em->start <= logical && em->start + em->len > logical) {
4155                 free_extent_map(em);
4156                 return 0;
4157         } else if (em) {
4158                 free_extent_map(em);
4159         }
4160
4161         em = alloc_extent_map();
4162         if (!em)
4163                 return -ENOMEM;
4164         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4165         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4166         if (!map) {
4167                 free_extent_map(em);
4168                 return -ENOMEM;
4169         }
4170
4171         em->bdev = (struct block_device *)map;
4172         em->start = logical;
4173         em->len = length;
4174         em->block_start = 0;
4175         em->block_len = em->len;
4176
4177         map->num_stripes = num_stripes;
4178         map->io_width = btrfs_chunk_io_width(leaf, chunk);
4179         map->io_align = btrfs_chunk_io_align(leaf, chunk);
4180         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4181         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4182         map->type = btrfs_chunk_type(leaf, chunk);
4183         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4184         for (i = 0; i < num_stripes; i++) {
4185                 map->stripes[i].physical =
4186                         btrfs_stripe_offset_nr(leaf, chunk, i);
4187                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4188                 read_extent_buffer(leaf, uuid, (unsigned long)
4189                                    btrfs_stripe_dev_uuid_nr(chunk, i),
4190                                    BTRFS_UUID_SIZE);
4191                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4192                                                         NULL);
4193                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4194                         kfree(map);
4195                         free_extent_map(em);
4196                         return -EIO;
4197                 }
4198                 if (!map->stripes[i].dev) {
4199                         map->stripes[i].dev =
4200                                 add_missing_dev(root, devid, uuid);
4201                         if (!map->stripes[i].dev) {
4202                                 kfree(map);
4203                                 free_extent_map(em);
4204                                 return -EIO;
4205                         }
4206                 }
4207                 map->stripes[i].dev->in_fs_metadata = 1;
4208         }
4209
4210         write_lock(&map_tree->map_tree.lock);
4211         ret = add_extent_mapping(&map_tree->map_tree, em);
4212         write_unlock(&map_tree->map_tree.lock);
4213         BUG_ON(ret);
4214         free_extent_map(em);
4215
4216         return 0;
4217 }
4218
4219 static int fill_device_from_item(struct extent_buffer *leaf,
4220                                  struct btrfs_dev_item *dev_item,
4221                                  struct btrfs_device *device)
4222 {
4223         unsigned long ptr;
4224
4225         device->devid = btrfs_device_id(leaf, dev_item);
4226         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4227         device->total_bytes = device->disk_total_bytes;
4228         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4229         device->type = btrfs_device_type(leaf, dev_item);
4230         device->io_align = btrfs_device_io_align(leaf, dev_item);
4231         device->io_width = btrfs_device_io_width(leaf, dev_item);
4232         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4233
4234         ptr = (unsigned long)btrfs_device_uuid(dev_item);
4235         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4236
4237         return 0;
4238 }
4239
4240 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4241 {
4242         struct btrfs_fs_devices *fs_devices;
4243         int ret;
4244
4245         BUG_ON(!mutex_is_locked(&uuid_mutex));
4246
4247         fs_devices = root->fs_info->fs_devices->seed;
4248         while (fs_devices) {
4249                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4250                         ret = 0;
4251                         goto out;
4252                 }
4253                 fs_devices = fs_devices->seed;
4254         }
4255
4256         fs_devices = find_fsid(fsid);
4257         if (!fs_devices) {
4258                 ret = -ENOENT;
4259                 goto out;
4260         }
4261
4262         fs_devices = clone_fs_devices(fs_devices);
4263         if (IS_ERR(fs_devices)) {
4264                 ret = PTR_ERR(fs_devices);
4265                 goto out;
4266         }
4267
4268         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4269                                    root->fs_info->bdev_holder);
4270         if (ret)
4271                 goto out;
4272
4273         if (!fs_devices->seeding) {
4274                 __btrfs_close_devices(fs_devices);
4275                 free_fs_devices(fs_devices);
4276                 ret = -EINVAL;
4277                 goto out;
4278         }
4279
4280         fs_devices->seed = root->fs_info->fs_devices->seed;
4281         root->fs_info->fs_devices->seed = fs_devices;
4282 out:
4283         return ret;
4284 }
4285
4286 static int read_one_dev(struct btrfs_root *root,
4287                         struct extent_buffer *leaf,
4288                         struct btrfs_dev_item *dev_item)
4289 {
4290         struct btrfs_device *device;
4291         u64 devid;
4292         int ret;
4293         u8 fs_uuid[BTRFS_UUID_SIZE];
4294         u8 dev_uuid[BTRFS_UUID_SIZE];
4295
4296         devid = btrfs_device_id(leaf, dev_item);
4297         read_extent_buffer(leaf, dev_uuid,
4298                            (unsigned long)btrfs_device_uuid(dev_item),
4299                            BTRFS_UUID_SIZE);
4300         read_extent_buffer(leaf, fs_uuid,
4301                            (unsigned long)btrfs_device_fsid(dev_item),
4302                            BTRFS_UUID_SIZE);
4303
4304         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4305                 ret = open_seed_devices(root, fs_uuid);
4306                 if (ret && !btrfs_test_opt(root, DEGRADED))
4307                         return ret;
4308         }
4309
4310         device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4311         if (!device || !device->bdev) {
4312                 if (!btrfs_test_opt(root, DEGRADED))
4313                         return -EIO;
4314
4315                 if (!device) {
4316                         printk(KERN_WARNING "warning devid %llu missing\n",
4317                                (unsigned long long)devid);
4318                         device = add_missing_dev(root, devid, dev_uuid);
4319                         if (!device)
4320                                 return -ENOMEM;
4321                 } else if (!device->missing) {
4322                         /*
4323                          * this happens when a device that was properly setup
4324                          * in the device info lists suddenly goes bad.
4325                          * device->bdev is NULL, and so we have to set
4326                          * device->missing to one here
4327                          */
4328                         root->fs_info->fs_devices->missing_devices++;
4329                         device->missing = 1;
4330                 }
4331         }
4332
4333         if (device->fs_devices != root->fs_info->fs_devices) {
4334                 BUG_ON(device->writeable);
4335                 if (device->generation !=
4336                     btrfs_device_generation(leaf, dev_item))
4337                         return -EINVAL;
4338         }
4339
4340         fill_device_from_item(leaf, dev_item, device);
4341         device->dev_root = root->fs_info->dev_root;
4342         device->in_fs_metadata = 1;
4343         if (device->writeable) {
4344                 device->fs_devices->total_rw_bytes += device->total_bytes;
4345                 spin_lock(&root->fs_info->free_chunk_lock);
4346                 root->fs_info->free_chunk_space += device->total_bytes -
4347                         device->bytes_used;
4348                 spin_unlock(&root->fs_info->free_chunk_lock);
4349         }
4350         ret = 0;
4351         return ret;
4352 }
4353
4354 int btrfs_read_sys_array(struct btrfs_root *root)
4355 {
4356         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4357         struct extent_buffer *sb;
4358         struct btrfs_disk_key *disk_key;
4359         struct btrfs_chunk *chunk;
4360         u8 *ptr;
4361         unsigned long sb_ptr;
4362         int ret = 0;
4363         u32 num_stripes;
4364         u32 array_size;
4365         u32 len = 0;
4366         u32 cur;
4367         struct btrfs_key key;
4368
4369         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4370                                           BTRFS_SUPER_INFO_SIZE);
4371         if (!sb)
4372                 return -ENOMEM;
4373         btrfs_set_buffer_uptodate(sb);
4374         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4375         /*
4376          * The sb extent buffer is artifical and just used to read the system array.
4377          * btrfs_set_buffer_uptodate() call does not properly mark all it's
4378          * pages up-to-date when the page is larger: extent does not cover the
4379          * whole page and consequently check_page_uptodate does not find all
4380          * the page's extents up-to-date (the hole beyond sb),
4381          * write_extent_buffer then triggers a WARN_ON.
4382          *
4383          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4384          * but sb spans only this function. Add an explicit SetPageUptodate call
4385          * to silence the warning eg. on PowerPC 64.
4386          */
4387         if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4388                 SetPageUptodate(sb->first_page);
4389
4390         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4391         array_size = btrfs_super_sys_array_size(super_copy);
4392
4393         ptr = super_copy->sys_chunk_array;
4394         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4395         cur = 0;
4396
4397         while (cur < array_size) {
4398                 disk_key = (struct btrfs_disk_key *)ptr;
4399                 btrfs_disk_key_to_cpu(&key, disk_key);
4400
4401                 len = sizeof(*disk_key); ptr += len;
4402                 sb_ptr += len;
4403                 cur += len;
4404
4405                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4406                         chunk = (struct btrfs_chunk *)sb_ptr;
4407                         ret = read_one_chunk(root, &key, sb, chunk);
4408                         if (ret)
4409                                 break;
4410                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4411                         len = btrfs_chunk_item_size(num_stripes);
4412                 } else {
4413                         ret = -EIO;
4414                         break;
4415                 }
4416                 ptr += len;
4417                 sb_ptr += len;
4418                 cur += len;
4419         }
4420         free_extent_buffer(sb);
4421         return ret;
4422 }
4423
4424 int btrfs_read_chunk_tree(struct btrfs_root *root)
4425 {
4426         struct btrfs_path *path;
4427         struct extent_buffer *leaf;
4428         struct btrfs_key key;
4429         struct btrfs_key found_key;
4430         int ret;
4431         int slot;
4432
4433         root = root->fs_info->chunk_root;
4434
4435         path = btrfs_alloc_path();
4436         if (!path)
4437                 return -ENOMEM;
4438
4439         mutex_lock(&uuid_mutex);
4440         lock_chunks(root);
4441
4442         /* first we search for all of the device items, and then we
4443          * read in all of the chunk items.  This way we can create chunk
4444          * mappings that reference all of the devices that are afound
4445          */
4446         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4447         key.offset = 0;
4448         key.type = 0;
4449 again:
4450         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4451         if (ret < 0)
4452                 goto error;
4453         while (1) {
4454                 leaf = path->nodes[0];
4455                 slot = path->slots[0];
4456                 if (slot >= btrfs_header_nritems(leaf)) {
4457                         ret = btrfs_next_leaf(root, path);
4458                         if (ret == 0)
4459                                 continue;
4460                         if (ret < 0)
4461                                 goto error;
4462                         break;
4463                 }
4464                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4465                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4466                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4467                                 break;
4468                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4469                                 struct btrfs_dev_item *dev_item;
4470                                 dev_item = btrfs_item_ptr(leaf, slot,
4471                                                   struct btrfs_dev_item);
4472                                 ret = read_one_dev(root, leaf, dev_item);
4473                                 if (ret)
4474                                         goto error;
4475                         }
4476                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4477                         struct btrfs_chunk *chunk;
4478                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4479                         ret = read_one_chunk(root, &found_key, leaf, chunk);
4480                         if (ret)
4481                                 goto error;
4482                 }
4483                 path->slots[0]++;
4484         }
4485         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4486                 key.objectid = 0;
4487                 btrfs_release_path(path);
4488                 goto again;
4489         }
4490         ret = 0;
4491 error:
4492         unlock_chunks(root);
4493         mutex_unlock(&uuid_mutex);
4494
4495         btrfs_free_path(path);
4496         return ret;
4497 }