2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
38 #include "extent_map.h"
40 #include "transaction.h"
41 #include "print-tree.h"
44 #include "async-thread.h"
45 #include "check-integrity.h"
46 #include "rcu-string.h"
48 /* set when additional merges to this rbio are not allowed */
49 #define RBIO_RMW_LOCKED_BIT 1
52 * set when this rbio is sitting in the hash, but it is just a cache
55 #define RBIO_CACHE_BIT 2
58 * set when it is safe to trust the stripe_pages for caching
60 #define RBIO_CACHE_READY_BIT 3
63 #define RBIO_CACHE_SIZE 1024
65 struct btrfs_raid_bio {
66 struct btrfs_fs_info *fs_info;
67 struct btrfs_bio *bbio;
70 * logical block numbers for the start of each stripe
71 * The last one or two are p/q. These are sorted,
72 * so raid_map[0] is the start of our full stripe
76 /* while we're doing rmw on a stripe
77 * we put it into a hash table so we can
78 * lock the stripe and merge more rbios
81 struct list_head hash_list;
84 * LRU list for the stripe cache
86 struct list_head stripe_cache;
89 * for scheduling work in the helper threads
91 struct btrfs_work work;
94 * bio list and bio_list_lock are used
95 * to add more bios into the stripe
96 * in hopes of avoiding the full rmw
98 struct bio_list bio_list;
99 spinlock_t bio_list_lock;
101 /* also protected by the bio_list_lock, the
102 * plug list is used by the plugging code
103 * to collect partial bios while plugged. The
104 * stripe locking code also uses it to hand off
105 * the stripe lock to the next pending IO
107 struct list_head plug_list;
110 * flags that tell us if it is safe to
111 * merge with this bio
115 /* size of each individual stripe on disk */
118 /* number of data stripes (no p/q) */
122 * set if we're doing a parity rebuild
123 * for a read from higher up, which is handled
124 * differently from a parity rebuild as part of
129 /* first bad stripe */
132 /* second bad stripe (for raid6 use) */
136 * number of pages needed to represent the full
142 * size of all the bios in the bio_list. This
143 * helps us decide if the rbio maps to a full
151 * these are two arrays of pointers. We allocate the
152 * rbio big enough to hold them both and setup their
153 * locations when the rbio is allocated
156 /* pointers to pages that we allocated for
157 * reading/writing stripes directly from the disk (including P/Q)
159 struct page **stripe_pages;
162 * pointers to the pages in the bio_list. Stored
163 * here for faster lookup
165 struct page **bio_pages;
168 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
169 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
170 static void rmw_work(struct btrfs_work *work);
171 static void read_rebuild_work(struct btrfs_work *work);
172 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
173 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
174 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
175 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
176 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
177 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
178 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
181 * the stripe hash table is used for locking, and to collect
182 * bios in hopes of making a full stripe
184 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
186 struct btrfs_stripe_hash_table *table;
187 struct btrfs_stripe_hash_table *x;
188 struct btrfs_stripe_hash *cur;
189 struct btrfs_stripe_hash *h;
190 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
194 if (info->stripe_hash_table)
198 * The table is large, starting with order 4 and can go as high as
199 * order 7 in case lock debugging is turned on.
201 * Try harder to allocate and fallback to vmalloc to lower the chance
202 * of a failing mount.
204 table_size = sizeof(*table) + sizeof(*h) * num_entries;
205 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
207 table = vzalloc(table_size);
212 spin_lock_init(&table->cache_lock);
213 INIT_LIST_HEAD(&table->stripe_cache);
217 for (i = 0; i < num_entries; i++) {
219 INIT_LIST_HEAD(&cur->hash_list);
220 spin_lock_init(&cur->lock);
221 init_waitqueue_head(&cur->wait);
224 x = cmpxchg(&info->stripe_hash_table, NULL, table);
226 if (is_vmalloc_addr(x))
235 * caching an rbio means to copy anything from the
236 * bio_pages array into the stripe_pages array. We
237 * use the page uptodate bit in the stripe cache array
238 * to indicate if it has valid data
240 * once the caching is done, we set the cache ready
243 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
250 ret = alloc_rbio_pages(rbio);
254 for (i = 0; i < rbio->nr_pages; i++) {
255 if (!rbio->bio_pages[i])
258 s = kmap(rbio->bio_pages[i]);
259 d = kmap(rbio->stripe_pages[i]);
261 memcpy(d, s, PAGE_CACHE_SIZE);
263 kunmap(rbio->bio_pages[i]);
264 kunmap(rbio->stripe_pages[i]);
265 SetPageUptodate(rbio->stripe_pages[i]);
267 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
271 * we hash on the first logical address of the stripe
273 static int rbio_bucket(struct btrfs_raid_bio *rbio)
275 u64 num = rbio->raid_map[0];
278 * we shift down quite a bit. We're using byte
279 * addressing, and most of the lower bits are zeros.
280 * This tends to upset hash_64, and it consistently
281 * returns just one or two different values.
283 * shifting off the lower bits fixes things.
285 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
289 * stealing an rbio means taking all the uptodate pages from the stripe
290 * array in the source rbio and putting them into the destination rbio
292 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
298 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 for (i = 0; i < dest->nr_pages; i++) {
302 s = src->stripe_pages[i];
303 if (!s || !PageUptodate(s)) {
307 d = dest->stripe_pages[i];
311 dest->stripe_pages[i] = s;
312 src->stripe_pages[i] = NULL;
317 * merging means we take the bio_list from the victim and
318 * splice it into the destination. The victim should
319 * be discarded afterwards.
321 * must be called with dest->rbio_list_lock held
323 static void merge_rbio(struct btrfs_raid_bio *dest,
324 struct btrfs_raid_bio *victim)
326 bio_list_merge(&dest->bio_list, &victim->bio_list);
327 dest->bio_list_bytes += victim->bio_list_bytes;
328 bio_list_init(&victim->bio_list);
332 * used to prune items that are in the cache. The caller
333 * must hold the hash table lock.
335 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
337 int bucket = rbio_bucket(rbio);
338 struct btrfs_stripe_hash_table *table;
339 struct btrfs_stripe_hash *h;
343 * check the bit again under the hash table lock.
345 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
348 table = rbio->fs_info->stripe_hash_table;
349 h = table->table + bucket;
351 /* hold the lock for the bucket because we may be
352 * removing it from the hash table
357 * hold the lock for the bio list because we need
358 * to make sure the bio list is empty
360 spin_lock(&rbio->bio_list_lock);
362 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
363 list_del_init(&rbio->stripe_cache);
364 table->cache_size -= 1;
367 /* if the bio list isn't empty, this rbio is
368 * still involved in an IO. We take it out
369 * of the cache list, and drop the ref that
370 * was held for the list.
372 * If the bio_list was empty, we also remove
373 * the rbio from the hash_table, and drop
374 * the corresponding ref
376 if (bio_list_empty(&rbio->bio_list)) {
377 if (!list_empty(&rbio->hash_list)) {
378 list_del_init(&rbio->hash_list);
379 atomic_dec(&rbio->refs);
380 BUG_ON(!list_empty(&rbio->plug_list));
385 spin_unlock(&rbio->bio_list_lock);
386 spin_unlock(&h->lock);
389 __free_raid_bio(rbio);
393 * prune a given rbio from the cache
395 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
397 struct btrfs_stripe_hash_table *table;
400 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
403 table = rbio->fs_info->stripe_hash_table;
405 spin_lock_irqsave(&table->cache_lock, flags);
406 __remove_rbio_from_cache(rbio);
407 spin_unlock_irqrestore(&table->cache_lock, flags);
411 * remove everything in the cache
413 void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
415 struct btrfs_stripe_hash_table *table;
417 struct btrfs_raid_bio *rbio;
419 table = info->stripe_hash_table;
421 spin_lock_irqsave(&table->cache_lock, flags);
422 while (!list_empty(&table->stripe_cache)) {
423 rbio = list_entry(table->stripe_cache.next,
424 struct btrfs_raid_bio,
426 __remove_rbio_from_cache(rbio);
428 spin_unlock_irqrestore(&table->cache_lock, flags);
432 * remove all cached entries and free the hash table
435 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
437 if (!info->stripe_hash_table)
439 btrfs_clear_rbio_cache(info);
440 if (is_vmalloc_addr(info->stripe_hash_table))
441 vfree(info->stripe_hash_table);
443 kfree(info->stripe_hash_table);
444 info->stripe_hash_table = NULL;
448 * insert an rbio into the stripe cache. It
449 * must have already been prepared by calling
452 * If this rbio was already cached, it gets
453 * moved to the front of the lru.
455 * If the size of the rbio cache is too big, we
458 static void cache_rbio(struct btrfs_raid_bio *rbio)
460 struct btrfs_stripe_hash_table *table;
463 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
466 table = rbio->fs_info->stripe_hash_table;
468 spin_lock_irqsave(&table->cache_lock, flags);
469 spin_lock(&rbio->bio_list_lock);
471 /* bump our ref if we were not in the list before */
472 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
473 atomic_inc(&rbio->refs);
475 if (!list_empty(&rbio->stripe_cache)){
476 list_move(&rbio->stripe_cache, &table->stripe_cache);
478 list_add(&rbio->stripe_cache, &table->stripe_cache);
479 table->cache_size += 1;
482 spin_unlock(&rbio->bio_list_lock);
484 if (table->cache_size > RBIO_CACHE_SIZE) {
485 struct btrfs_raid_bio *found;
487 found = list_entry(table->stripe_cache.prev,
488 struct btrfs_raid_bio,
492 __remove_rbio_from_cache(found);
495 spin_unlock_irqrestore(&table->cache_lock, flags);
500 * helper function to run the xor_blocks api. It is only
501 * able to do MAX_XOR_BLOCKS at a time, so we need to
504 static void run_xor(void **pages, int src_cnt, ssize_t len)
508 void *dest = pages[src_cnt];
511 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
512 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
514 src_cnt -= xor_src_cnt;
515 src_off += xor_src_cnt;
520 * returns true if the bio list inside this rbio
521 * covers an entire stripe (no rmw required).
522 * Must be called with the bio list lock held, or
523 * at a time when you know it is impossible to add
524 * new bios into the list
526 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
528 unsigned long size = rbio->bio_list_bytes;
531 if (size != rbio->nr_data * rbio->stripe_len)
534 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
538 static int rbio_is_full(struct btrfs_raid_bio *rbio)
543 spin_lock_irqsave(&rbio->bio_list_lock, flags);
544 ret = __rbio_is_full(rbio);
545 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
550 * returns 1 if it is safe to merge two rbios together.
551 * The merging is safe if the two rbios correspond to
552 * the same stripe and if they are both going in the same
553 * direction (read vs write), and if neither one is
554 * locked for final IO
556 * The caller is responsible for locking such that
557 * rmw_locked is safe to test
559 static int rbio_can_merge(struct btrfs_raid_bio *last,
560 struct btrfs_raid_bio *cur)
562 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
563 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
567 * we can't merge with cached rbios, since the
568 * idea is that when we merge the destination
569 * rbio is going to run our IO for us. We can
570 * steal from cached rbio's though, other functions
573 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
574 test_bit(RBIO_CACHE_BIT, &cur->flags))
577 if (last->raid_map[0] !=
581 /* reads can't merge with writes */
582 if (last->read_rebuild !=
591 * helper to index into the pstripe
593 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
595 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
596 return rbio->stripe_pages[index];
600 * helper to index into the qstripe, returns null
601 * if there is no qstripe
603 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
605 if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
608 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
610 return rbio->stripe_pages[index];
614 * The first stripe in the table for a logical address
615 * has the lock. rbios are added in one of three ways:
617 * 1) Nobody has the stripe locked yet. The rbio is given
618 * the lock and 0 is returned. The caller must start the IO
621 * 2) Someone has the stripe locked, but we're able to merge
622 * with the lock owner. The rbio is freed and the IO will
623 * start automatically along with the existing rbio. 1 is returned.
625 * 3) Someone has the stripe locked, but we're not able to merge.
626 * The rbio is added to the lock owner's plug list, or merged into
627 * an rbio already on the plug list. When the lock owner unlocks,
628 * the next rbio on the list is run and the IO is started automatically.
631 * If we return 0, the caller still owns the rbio and must continue with
632 * IO submission. If we return 1, the caller must assume the rbio has
633 * already been freed.
635 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
637 int bucket = rbio_bucket(rbio);
638 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
639 struct btrfs_raid_bio *cur;
640 struct btrfs_raid_bio *pending;
643 struct btrfs_raid_bio *freeit = NULL;
644 struct btrfs_raid_bio *cache_drop = NULL;
648 spin_lock_irqsave(&h->lock, flags);
649 list_for_each_entry(cur, &h->hash_list, hash_list) {
651 if (cur->raid_map[0] == rbio->raid_map[0]) {
652 spin_lock(&cur->bio_list_lock);
654 /* can we steal this cached rbio's pages? */
655 if (bio_list_empty(&cur->bio_list) &&
656 list_empty(&cur->plug_list) &&
657 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
658 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
659 list_del_init(&cur->hash_list);
660 atomic_dec(&cur->refs);
662 steal_rbio(cur, rbio);
664 spin_unlock(&cur->bio_list_lock);
669 /* can we merge into the lock owner? */
670 if (rbio_can_merge(cur, rbio)) {
671 merge_rbio(cur, rbio);
672 spin_unlock(&cur->bio_list_lock);
680 * we couldn't merge with the running
681 * rbio, see if we can merge with the
682 * pending ones. We don't have to
683 * check for rmw_locked because there
684 * is no way they are inside finish_rmw
687 list_for_each_entry(pending, &cur->plug_list,
689 if (rbio_can_merge(pending, rbio)) {
690 merge_rbio(pending, rbio);
691 spin_unlock(&cur->bio_list_lock);
698 /* no merging, put us on the tail of the plug list,
699 * our rbio will be started with the currently
700 * running rbio unlocks
702 list_add_tail(&rbio->plug_list, &cur->plug_list);
703 spin_unlock(&cur->bio_list_lock);
709 atomic_inc(&rbio->refs);
710 list_add(&rbio->hash_list, &h->hash_list);
712 spin_unlock_irqrestore(&h->lock, flags);
714 remove_rbio_from_cache(cache_drop);
716 __free_raid_bio(freeit);
721 * called as rmw or parity rebuild is completed. If the plug list has more
722 * rbios waiting for this stripe, the next one on the list will be started
724 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
727 struct btrfs_stripe_hash *h;
731 bucket = rbio_bucket(rbio);
732 h = rbio->fs_info->stripe_hash_table->table + bucket;
734 if (list_empty(&rbio->plug_list))
737 spin_lock_irqsave(&h->lock, flags);
738 spin_lock(&rbio->bio_list_lock);
740 if (!list_empty(&rbio->hash_list)) {
742 * if we're still cached and there is no other IO
743 * to perform, just leave this rbio here for others
744 * to steal from later
746 if (list_empty(&rbio->plug_list) &&
747 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
749 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
750 BUG_ON(!bio_list_empty(&rbio->bio_list));
754 list_del_init(&rbio->hash_list);
755 atomic_dec(&rbio->refs);
758 * we use the plug list to hold all the rbios
759 * waiting for the chance to lock this stripe.
760 * hand the lock over to one of them.
762 if (!list_empty(&rbio->plug_list)) {
763 struct btrfs_raid_bio *next;
764 struct list_head *head = rbio->plug_list.next;
766 next = list_entry(head, struct btrfs_raid_bio,
769 list_del_init(&rbio->plug_list);
771 list_add(&next->hash_list, &h->hash_list);
772 atomic_inc(&next->refs);
773 spin_unlock(&rbio->bio_list_lock);
774 spin_unlock_irqrestore(&h->lock, flags);
776 if (next->read_rebuild)
777 async_read_rebuild(next);
779 steal_rbio(rbio, next);
780 async_rmw_stripe(next);
784 } else if (waitqueue_active(&h->wait)) {
785 spin_unlock(&rbio->bio_list_lock);
786 spin_unlock_irqrestore(&h->lock, flags);
792 spin_unlock(&rbio->bio_list_lock);
793 spin_unlock_irqrestore(&h->lock, flags);
797 remove_rbio_from_cache(rbio);
800 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
804 WARN_ON(atomic_read(&rbio->refs) < 0);
805 if (!atomic_dec_and_test(&rbio->refs))
808 WARN_ON(!list_empty(&rbio->stripe_cache));
809 WARN_ON(!list_empty(&rbio->hash_list));
810 WARN_ON(!bio_list_empty(&rbio->bio_list));
812 for (i = 0; i < rbio->nr_pages; i++) {
813 if (rbio->stripe_pages[i]) {
814 __free_page(rbio->stripe_pages[i]);
815 rbio->stripe_pages[i] = NULL;
818 kfree(rbio->raid_map);
823 static void free_raid_bio(struct btrfs_raid_bio *rbio)
826 __free_raid_bio(rbio);
830 * this frees the rbio and runs through all the bios in the
831 * bio_list and calls end_io on them
833 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
835 struct bio *cur = bio_list_get(&rbio->bio_list);
843 set_bit(BIO_UPTODATE, &cur->bi_flags);
850 * end io function used by finish_rmw. When we finally
851 * get here, we've written a full stripe
853 static void raid_write_end_io(struct bio *bio, int err)
855 struct btrfs_raid_bio *rbio = bio->bi_private;
858 fail_bio_stripe(rbio, bio);
862 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
867 /* OK, we have read all the stripes we need to. */
868 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
871 rbio_orig_end_io(rbio, err, 0);
876 * the read/modify/write code wants to use the original bio for
877 * any pages it included, and then use the rbio for everything
878 * else. This function decides if a given index (stripe number)
879 * and page number in that stripe fall inside the original bio
882 * if you set bio_list_only, you'll get a NULL back for any ranges
883 * that are outside the bio_list
885 * This doesn't take any refs on anything, you get a bare page pointer
886 * and the caller must bump refs as required.
888 * You must call index_rbio_pages once before you can trust
889 * the answers from this function.
891 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
892 int index, int pagenr, int bio_list_only)
895 struct page *p = NULL;
897 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
899 spin_lock_irq(&rbio->bio_list_lock);
900 p = rbio->bio_pages[chunk_page];
901 spin_unlock_irq(&rbio->bio_list_lock);
903 if (p || bio_list_only)
906 return rbio->stripe_pages[chunk_page];
910 * number of pages we need for the entire stripe across all the
913 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
915 unsigned long nr = stripe_len * nr_stripes;
916 return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
920 * allocation and initial setup for the btrfs_raid_bio. Not
921 * this does not allocate any pages for rbio->pages.
923 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
924 struct btrfs_bio *bbio, u64 *raid_map,
927 struct btrfs_raid_bio *rbio;
929 int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
932 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
937 return ERR_PTR(-ENOMEM);
940 bio_list_init(&rbio->bio_list);
941 INIT_LIST_HEAD(&rbio->plug_list);
942 spin_lock_init(&rbio->bio_list_lock);
943 INIT_LIST_HEAD(&rbio->stripe_cache);
944 INIT_LIST_HEAD(&rbio->hash_list);
946 rbio->raid_map = raid_map;
947 rbio->fs_info = root->fs_info;
948 rbio->stripe_len = stripe_len;
949 rbio->nr_pages = num_pages;
952 atomic_set(&rbio->refs, 1);
955 * the stripe_pages and bio_pages array point to the extra
956 * memory we allocated past the end of the rbio
959 rbio->stripe_pages = p;
960 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
962 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
963 nr_data = bbio->num_stripes - 2;
965 nr_data = bbio->num_stripes - 1;
967 rbio->nr_data = nr_data;
971 /* allocate pages for all the stripes in the bio, including parity */
972 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
977 for (i = 0; i < rbio->nr_pages; i++) {
978 if (rbio->stripe_pages[i])
980 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
983 rbio->stripe_pages[i] = page;
984 ClearPageUptodate(page);
989 /* allocate pages for just the p/q stripes */
990 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
995 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
997 for (; i < rbio->nr_pages; i++) {
998 if (rbio->stripe_pages[i])
1000 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1003 rbio->stripe_pages[i] = page;
1009 * add a single page from a specific stripe into our list of bios for IO
1010 * this will try to merge into existing bios if possible, and returns
1011 * zero if all went well.
1013 int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1014 struct bio_list *bio_list,
1017 unsigned long page_index,
1018 unsigned long bio_max_len)
1020 struct bio *last = bio_list->tail;
1024 struct btrfs_bio_stripe *stripe;
1027 stripe = &rbio->bbio->stripes[stripe_nr];
1028 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1030 /* if the device is missing, just fail this stripe */
1031 if (!stripe->dev->bdev)
1032 return fail_rbio_index(rbio, stripe_nr);
1034 /* see if we can add this page onto our existing bio */
1036 last_end = (u64)last->bi_sector << 9;
1037 last_end += last->bi_size;
1040 * we can't merge these if they are from different
1041 * devices or if they are not contiguous
1043 if (last_end == disk_start && stripe->dev->bdev &&
1044 test_bit(BIO_UPTODATE, &last->bi_flags) &&
1045 last->bi_bdev == stripe->dev->bdev) {
1046 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1047 if (ret == PAGE_CACHE_SIZE)
1052 /* put a new bio on the list */
1053 bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1058 bio->bi_bdev = stripe->dev->bdev;
1059 bio->bi_sector = disk_start >> 9;
1060 set_bit(BIO_UPTODATE, &bio->bi_flags);
1062 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1063 bio_list_add(bio_list, bio);
1068 * while we're doing the read/modify/write cycle, we could
1069 * have errors in reading pages off the disk. This checks
1070 * for errors and if we're not able to read the page it'll
1071 * trigger parity reconstruction. The rmw will be finished
1072 * after we've reconstructed the failed stripes
1074 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1076 if (rbio->faila >= 0 || rbio->failb >= 0) {
1077 BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
1078 __raid56_parity_recover(rbio);
1085 * these are just the pages from the rbio array, not from anything
1086 * the FS sent down to us
1088 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1091 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1093 return rbio->stripe_pages[index];
1097 * helper function to walk our bio list and populate the bio_pages array with
1098 * the result. This seems expensive, but it is faster than constantly
1099 * searching through the bio list as we setup the IO in finish_rmw or stripe
1102 * This must be called before you trust the answers from page_in_rbio
1104 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1108 unsigned long stripe_offset;
1109 unsigned long page_index;
1113 spin_lock_irq(&rbio->bio_list_lock);
1114 bio_list_for_each(bio, &rbio->bio_list) {
1115 start = (u64)bio->bi_sector << 9;
1116 stripe_offset = start - rbio->raid_map[0];
1117 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1119 for (i = 0; i < bio->bi_vcnt; i++) {
1120 p = bio->bi_io_vec[i].bv_page;
1121 rbio->bio_pages[page_index + i] = p;
1124 spin_unlock_irq(&rbio->bio_list_lock);
1128 * this is called from one of two situations. We either
1129 * have a full stripe from the higher layers, or we've read all
1130 * the missing bits off disk.
1132 * This will calculate the parity and then send down any
1135 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1137 struct btrfs_bio *bbio = rbio->bbio;
1138 void *pointers[bbio->num_stripes];
1139 int stripe_len = rbio->stripe_len;
1140 int nr_data = rbio->nr_data;
1145 struct bio_list bio_list;
1147 int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1150 bio_list_init(&bio_list);
1152 if (bbio->num_stripes - rbio->nr_data == 1) {
1153 p_stripe = bbio->num_stripes - 1;
1154 } else if (bbio->num_stripes - rbio->nr_data == 2) {
1155 p_stripe = bbio->num_stripes - 2;
1156 q_stripe = bbio->num_stripes - 1;
1161 /* at this point we either have a full stripe,
1162 * or we've read the full stripe from the drive.
1163 * recalculate the parity and write the new results.
1165 * We're not allowed to add any new bios to the
1166 * bio list here, anyone else that wants to
1167 * change this stripe needs to do their own rmw.
1169 spin_lock_irq(&rbio->bio_list_lock);
1170 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1171 spin_unlock_irq(&rbio->bio_list_lock);
1173 atomic_set(&rbio->bbio->error, 0);
1176 * now that we've set rmw_locked, run through the
1177 * bio list one last time and map the page pointers
1179 * We don't cache full rbios because we're assuming
1180 * the higher layers are unlikely to use this area of
1181 * the disk again soon. If they do use it again,
1182 * hopefully they will send another full bio.
1184 index_rbio_pages(rbio);
1185 if (!rbio_is_full(rbio))
1186 cache_rbio_pages(rbio);
1188 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1190 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1192 /* first collect one page from each data stripe */
1193 for (stripe = 0; stripe < nr_data; stripe++) {
1194 p = page_in_rbio(rbio, stripe, pagenr, 0);
1195 pointers[stripe] = kmap(p);
1198 /* then add the parity stripe */
1199 p = rbio_pstripe_page(rbio, pagenr);
1201 pointers[stripe++] = kmap(p);
1203 if (q_stripe != -1) {
1206 * raid6, add the qstripe and call the
1207 * library function to fill in our p/q
1209 p = rbio_qstripe_page(rbio, pagenr);
1211 pointers[stripe++] = kmap(p);
1213 raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
1217 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1218 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1222 for (stripe = 0; stripe < bbio->num_stripes; stripe++)
1223 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1227 * time to start writing. Make bios for everything from the
1228 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1231 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1232 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1234 if (stripe < rbio->nr_data) {
1235 page = page_in_rbio(rbio, stripe, pagenr, 1);
1239 page = rbio_stripe_page(rbio, stripe, pagenr);
1242 ret = rbio_add_io_page(rbio, &bio_list,
1243 page, stripe, pagenr, rbio->stripe_len);
1249 atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
1250 BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
1253 bio = bio_list_pop(&bio_list);
1257 bio->bi_private = rbio;
1258 bio->bi_end_io = raid_write_end_io;
1259 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1260 submit_bio(WRITE, bio);
1265 rbio_orig_end_io(rbio, -EIO, 0);
1269 * helper to find the stripe number for a given bio. Used to figure out which
1270 * stripe has failed. This expects the bio to correspond to a physical disk,
1271 * so it looks up based on physical sector numbers.
1273 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1276 u64 physical = bio->bi_sector;
1279 struct btrfs_bio_stripe *stripe;
1283 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1284 stripe = &rbio->bbio->stripes[i];
1285 stripe_start = stripe->physical;
1286 if (physical >= stripe_start &&
1287 physical < stripe_start + rbio->stripe_len) {
1295 * helper to find the stripe number for a given
1296 * bio (before mapping). Used to figure out which stripe has
1297 * failed. This looks up based on logical block numbers.
1299 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1302 u64 logical = bio->bi_sector;
1308 for (i = 0; i < rbio->nr_data; i++) {
1309 stripe_start = rbio->raid_map[i];
1310 if (logical >= stripe_start &&
1311 logical < stripe_start + rbio->stripe_len) {
1319 * returns -EIO if we had too many failures
1321 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1323 unsigned long flags;
1326 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1328 /* we already know this stripe is bad, move on */
1329 if (rbio->faila == failed || rbio->failb == failed)
1332 if (rbio->faila == -1) {
1333 /* first failure on this rbio */
1334 rbio->faila = failed;
1335 atomic_inc(&rbio->bbio->error);
1336 } else if (rbio->failb == -1) {
1337 /* second failure on this rbio */
1338 rbio->failb = failed;
1339 atomic_inc(&rbio->bbio->error);
1344 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1350 * helper to fail a stripe based on a physical disk
1353 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1356 int failed = find_bio_stripe(rbio, bio);
1361 return fail_rbio_index(rbio, failed);
1365 * this sets each page in the bio uptodate. It should only be used on private
1366 * rbio pages, nothing that comes in from the higher layers
1368 static void set_bio_pages_uptodate(struct bio *bio)
1373 for (i = 0; i < bio->bi_vcnt; i++) {
1374 p = bio->bi_io_vec[i].bv_page;
1380 * end io for the read phase of the rmw cycle. All the bios here are physical
1381 * stripe bios we've read from the disk so we can recalculate the parity of the
1384 * This will usually kick off finish_rmw once all the bios are read in, but it
1385 * may trigger parity reconstruction if we had any errors along the way
1387 static void raid_rmw_end_io(struct bio *bio, int err)
1389 struct btrfs_raid_bio *rbio = bio->bi_private;
1392 fail_bio_stripe(rbio, bio);
1394 set_bio_pages_uptodate(bio);
1398 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
1402 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
1406 * this will normally call finish_rmw to start our write
1407 * but if there are any failed stripes we'll reconstruct
1410 validate_rbio_for_rmw(rbio);
1415 rbio_orig_end_io(rbio, -EIO, 0);
1418 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1420 rbio->work.flags = 0;
1421 rbio->work.func = rmw_work;
1423 btrfs_queue_worker(&rbio->fs_info->rmw_workers,
1427 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1429 rbio->work.flags = 0;
1430 rbio->work.func = read_rebuild_work;
1432 btrfs_queue_worker(&rbio->fs_info->rmw_workers,
1437 * the stripe must be locked by the caller. It will
1438 * unlock after all the writes are done
1440 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1442 int bios_to_read = 0;
1443 struct btrfs_bio *bbio = rbio->bbio;
1444 struct bio_list bio_list;
1446 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1451 bio_list_init(&bio_list);
1453 ret = alloc_rbio_pages(rbio);
1457 index_rbio_pages(rbio);
1459 atomic_set(&rbio->bbio->error, 0);
1461 * build a list of bios to read all the missing parts of this
1464 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1465 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1468 * we want to find all the pages missing from
1469 * the rbio and read them from the disk. If
1470 * page_in_rbio finds a page in the bio list
1471 * we don't need to read it off the stripe.
1473 page = page_in_rbio(rbio, stripe, pagenr, 1);
1477 page = rbio_stripe_page(rbio, stripe, pagenr);
1479 * the bio cache may have handed us an uptodate
1480 * page. If so, be happy and use it
1482 if (PageUptodate(page))
1485 ret = rbio_add_io_page(rbio, &bio_list, page,
1486 stripe, pagenr, rbio->stripe_len);
1492 bios_to_read = bio_list_size(&bio_list);
1493 if (!bios_to_read) {
1495 * this can happen if others have merged with
1496 * us, it means there is nothing left to read.
1497 * But if there are missing devices it may not be
1498 * safe to do the full stripe write yet.
1504 * the bbio may be freed once we submit the last bio. Make sure
1505 * not to touch it after that
1507 atomic_set(&bbio->stripes_pending, bios_to_read);
1509 bio = bio_list_pop(&bio_list);
1513 bio->bi_private = rbio;
1514 bio->bi_end_io = raid_rmw_end_io;
1516 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1517 BTRFS_WQ_ENDIO_RAID56);
1519 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1520 submit_bio(READ, bio);
1522 /* the actual write will happen once the reads are done */
1526 rbio_orig_end_io(rbio, -EIO, 0);
1530 validate_rbio_for_rmw(rbio);
1535 * if the upper layers pass in a full stripe, we thank them by only allocating
1536 * enough pages to hold the parity, and sending it all down quickly.
1538 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1542 ret = alloc_rbio_parity_pages(rbio);
1546 ret = lock_stripe_add(rbio);
1553 * partial stripe writes get handed over to async helpers.
1554 * We're really hoping to merge a few more writes into this
1555 * rbio before calculating new parity
1557 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1561 ret = lock_stripe_add(rbio);
1563 async_rmw_stripe(rbio);
1568 * sometimes while we were reading from the drive to
1569 * recalculate parity, enough new bios come into create
1570 * a full stripe. So we do a check here to see if we can
1571 * go directly to finish_rmw
1573 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1575 /* head off into rmw land if we don't have a full stripe */
1576 if (!rbio_is_full(rbio))
1577 return partial_stripe_write(rbio);
1578 return full_stripe_write(rbio);
1582 * We use plugging call backs to collect full stripes.
1583 * Any time we get a partial stripe write while plugged
1584 * we collect it into a list. When the unplug comes down,
1585 * we sort the list by logical block number and merge
1586 * everything we can into the same rbios
1588 struct btrfs_plug_cb {
1589 struct blk_plug_cb cb;
1590 struct btrfs_fs_info *info;
1591 struct list_head rbio_list;
1592 struct btrfs_work work;
1596 * rbios on the plug list are sorted for easier merging.
1598 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1600 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1602 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1604 u64 a_sector = ra->bio_list.head->bi_sector;
1605 u64 b_sector = rb->bio_list.head->bi_sector;
1607 if (a_sector < b_sector)
1609 if (a_sector > b_sector)
1614 static void run_plug(struct btrfs_plug_cb *plug)
1616 struct btrfs_raid_bio *cur;
1617 struct btrfs_raid_bio *last = NULL;
1620 * sort our plug list then try to merge
1621 * everything we can in hopes of creating full
1624 list_sort(NULL, &plug->rbio_list, plug_cmp);
1625 while (!list_empty(&plug->rbio_list)) {
1626 cur = list_entry(plug->rbio_list.next,
1627 struct btrfs_raid_bio, plug_list);
1628 list_del_init(&cur->plug_list);
1630 if (rbio_is_full(cur)) {
1631 /* we have a full stripe, send it down */
1632 full_stripe_write(cur);
1636 if (rbio_can_merge(last, cur)) {
1637 merge_rbio(last, cur);
1638 __free_raid_bio(cur);
1642 __raid56_parity_write(last);
1647 __raid56_parity_write(last);
1653 * if the unplug comes from schedule, we have to push the
1654 * work off to a helper thread
1656 static void unplug_work(struct btrfs_work *work)
1658 struct btrfs_plug_cb *plug;
1659 plug = container_of(work, struct btrfs_plug_cb, work);
1663 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1665 struct btrfs_plug_cb *plug;
1666 plug = container_of(cb, struct btrfs_plug_cb, cb);
1668 if (from_schedule) {
1669 plug->work.flags = 0;
1670 plug->work.func = unplug_work;
1671 btrfs_queue_worker(&plug->info->rmw_workers,
1679 * our main entry point for writes from the rest of the FS.
1681 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1682 struct btrfs_bio *bbio, u64 *raid_map,
1685 struct btrfs_raid_bio *rbio;
1686 struct btrfs_plug_cb *plug = NULL;
1687 struct blk_plug_cb *cb;
1689 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1693 return PTR_ERR(rbio);
1695 bio_list_add(&rbio->bio_list, bio);
1696 rbio->bio_list_bytes = bio->bi_size;
1699 * don't plug on full rbios, just get them out the door
1700 * as quickly as we can
1702 if (rbio_is_full(rbio))
1703 return full_stripe_write(rbio);
1705 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1708 plug = container_of(cb, struct btrfs_plug_cb, cb);
1710 plug->info = root->fs_info;
1711 INIT_LIST_HEAD(&plug->rbio_list);
1713 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1715 return __raid56_parity_write(rbio);
1721 * all parity reconstruction happens here. We've read in everything
1722 * we can find from the drives and this does the heavy lifting of
1723 * sorting the good from the bad.
1725 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1729 int faila = -1, failb = -1;
1730 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1735 pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
1742 faila = rbio->faila;
1743 failb = rbio->failb;
1745 if (rbio->read_rebuild) {
1746 spin_lock_irq(&rbio->bio_list_lock);
1747 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1748 spin_unlock_irq(&rbio->bio_list_lock);
1751 index_rbio_pages(rbio);
1753 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1754 /* setup our array of pointers with pages
1757 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
1759 * if we're rebuilding a read, we have to use
1760 * pages from the bio list
1762 if (rbio->read_rebuild &&
1763 (stripe == faila || stripe == failb)) {
1764 page = page_in_rbio(rbio, stripe, pagenr, 0);
1766 page = rbio_stripe_page(rbio, stripe, pagenr);
1768 pointers[stripe] = kmap(page);
1771 /* all raid6 handling here */
1772 if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
1776 * single failure, rebuild from parity raid5
1780 if (faila == rbio->nr_data) {
1782 * Just the P stripe has failed, without
1783 * a bad data or Q stripe.
1784 * TODO, we should redo the xor here.
1790 * a single failure in raid6 is rebuilt
1791 * in the pstripe code below
1796 /* make sure our ps and qs are in order */
1797 if (faila > failb) {
1803 /* if the q stripe is failed, do a pstripe reconstruction
1805 * If both the q stripe and the P stripe are failed, we're
1806 * here due to a crc mismatch and we can't give them the
1809 if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
1810 if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
1815 * otherwise we have one bad data stripe and
1816 * a good P stripe. raid5!
1821 if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
1822 raid6_datap_recov(rbio->bbio->num_stripes,
1823 PAGE_SIZE, faila, pointers);
1825 raid6_2data_recov(rbio->bbio->num_stripes,
1826 PAGE_SIZE, faila, failb,
1832 /* rebuild from P stripe here (raid5 or raid6) */
1833 BUG_ON(failb != -1);
1835 /* Copy parity block into failed block to start with */
1836 memcpy(pointers[faila],
1837 pointers[rbio->nr_data],
1840 /* rearrange the pointer array */
1841 p = pointers[faila];
1842 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1843 pointers[stripe] = pointers[stripe + 1];
1844 pointers[rbio->nr_data - 1] = p;
1846 /* xor in the rest */
1847 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1849 /* if we're doing this rebuild as part of an rmw, go through
1850 * and set all of our private rbio pages in the
1851 * failed stripes as uptodate. This way finish_rmw will
1852 * know they can be trusted. If this was a read reconstruction,
1853 * other endio functions will fiddle the uptodate bits
1855 if (!rbio->read_rebuild) {
1856 for (i = 0; i < nr_pages; i++) {
1858 page = rbio_stripe_page(rbio, faila, i);
1859 SetPageUptodate(page);
1862 page = rbio_stripe_page(rbio, failb, i);
1863 SetPageUptodate(page);
1867 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
1869 * if we're rebuilding a read, we have to use
1870 * pages from the bio list
1872 if (rbio->read_rebuild &&
1873 (stripe == faila || stripe == failb)) {
1874 page = page_in_rbio(rbio, stripe, pagenr, 0);
1876 page = rbio_stripe_page(rbio, stripe, pagenr);
1888 if (rbio->read_rebuild) {
1890 cache_rbio_pages(rbio);
1892 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1894 rbio_orig_end_io(rbio, err, err == 0);
1895 } else if (err == 0) {
1900 rbio_orig_end_io(rbio, err, 0);
1905 * This is called only for stripes we've read from disk to
1906 * reconstruct the parity.
1908 static void raid_recover_end_io(struct bio *bio, int err)
1910 struct btrfs_raid_bio *rbio = bio->bi_private;
1913 * we only read stripe pages off the disk, set them
1914 * up to date if there were no errors
1917 fail_bio_stripe(rbio, bio);
1919 set_bio_pages_uptodate(bio);
1922 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
1925 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
1926 rbio_orig_end_io(rbio, -EIO, 0);
1928 __raid_recover_end_io(rbio);
1932 * reads everything we need off the disk to reconstruct
1933 * the parity. endio handlers trigger final reconstruction
1934 * when the IO is done.
1936 * This is used both for reads from the higher layers and for
1937 * parity construction required to finish a rmw cycle.
1939 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1941 int bios_to_read = 0;
1942 struct btrfs_bio *bbio = rbio->bbio;
1943 struct bio_list bio_list;
1945 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1950 bio_list_init(&bio_list);
1952 ret = alloc_rbio_pages(rbio);
1956 atomic_set(&rbio->bbio->error, 0);
1959 * read everything that hasn't failed. Thanks to the
1960 * stripe cache, it is possible that some or all of these
1961 * pages are going to be uptodate.
1963 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1964 if (rbio->faila == stripe ||
1965 rbio->failb == stripe)
1968 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1972 * the rmw code may have already read this
1975 p = rbio_stripe_page(rbio, stripe, pagenr);
1976 if (PageUptodate(p))
1979 ret = rbio_add_io_page(rbio, &bio_list,
1980 rbio_stripe_page(rbio, stripe, pagenr),
1981 stripe, pagenr, rbio->stripe_len);
1987 bios_to_read = bio_list_size(&bio_list);
1988 if (!bios_to_read) {
1990 * we might have no bios to read just because the pages
1991 * were up to date, or we might have no bios to read because
1992 * the devices were gone.
1994 if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
1995 __raid_recover_end_io(rbio);
2003 * the bbio may be freed once we submit the last bio. Make sure
2004 * not to touch it after that
2006 atomic_set(&bbio->stripes_pending, bios_to_read);
2008 bio = bio_list_pop(&bio_list);
2012 bio->bi_private = rbio;
2013 bio->bi_end_io = raid_recover_end_io;
2015 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2016 BTRFS_WQ_ENDIO_RAID56);
2018 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2019 submit_bio(READ, bio);
2025 if (rbio->read_rebuild)
2026 rbio_orig_end_io(rbio, -EIO, 0);
2031 * the main entry point for reads from the higher layers. This
2032 * is really only called when the normal read path had a failure,
2033 * so we assume the bio they send down corresponds to a failed part
2036 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2037 struct btrfs_bio *bbio, u64 *raid_map,
2038 u64 stripe_len, int mirror_num)
2040 struct btrfs_raid_bio *rbio;
2043 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2045 return PTR_ERR(rbio);
2048 rbio->read_rebuild = 1;
2049 bio_list_add(&rbio->bio_list, bio);
2050 rbio->bio_list_bytes = bio->bi_size;
2052 rbio->faila = find_logical_bio_stripe(rbio, bio);
2053 if (rbio->faila == -1) {
2060 * reconstruct from the q stripe if they are
2061 * asking for mirror 3
2063 if (mirror_num == 3)
2064 rbio->failb = bbio->num_stripes - 2;
2066 ret = lock_stripe_add(rbio);
2069 * __raid56_parity_recover will end the bio with
2070 * any errors it hits. We don't want to return
2071 * its error value up the stack because our caller
2072 * will end up calling bio_endio with any nonzero
2076 __raid56_parity_recover(rbio);
2078 * our rbio has been added to the list of
2079 * rbios that will be handled after the
2080 * currently lock owner is done
2086 static void rmw_work(struct btrfs_work *work)
2088 struct btrfs_raid_bio *rbio;
2090 rbio = container_of(work, struct btrfs_raid_bio, work);
2091 raid56_rmw_stripe(rbio);
2094 static void read_rebuild_work(struct btrfs_work *work)
2096 struct btrfs_raid_bio *rbio;
2098 rbio = container_of(work, struct btrfs_raid_bio, work);
2099 __raid56_parity_recover(rbio);