2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD = 1,
66 BTRFS_RBIO_PARITY_SCRUB = 2,
69 struct btrfs_raid_bio {
70 struct btrfs_fs_info *fs_info;
71 struct btrfs_bio *bbio;
73 /* while we're doing rmw on a stripe
74 * we put it into a hash table so we can
75 * lock the stripe and merge more rbios
78 struct list_head hash_list;
81 * LRU list for the stripe cache
83 struct list_head stripe_cache;
86 * for scheduling work in the helper threads
88 struct btrfs_work work;
91 * bio list and bio_list_lock are used
92 * to add more bios into the stripe
93 * in hopes of avoiding the full rmw
95 struct bio_list bio_list;
96 spinlock_t bio_list_lock;
98 /* also protected by the bio_list_lock, the
99 * plug list is used by the plugging code
100 * to collect partial bios while plugged. The
101 * stripe locking code also uses it to hand off
102 * the stripe lock to the next pending IO
104 struct list_head plug_list;
107 * flags that tell us if it is safe to
108 * merge with this bio
112 /* size of each individual stripe on disk */
115 /* number of data stripes (no p/q) */
122 * set if we're doing a parity rebuild
123 * for a read from higher up, which is handled
124 * differently from a parity rebuild as part of
127 enum btrfs_rbio_ops operation;
129 /* first bad stripe */
132 /* second bad stripe (for raid6 use) */
137 * number of pages needed to represent the full
143 * size of all the bios in the bio_list. This
144 * helps us decide if the rbio maps to a full
153 atomic_t stripes_pending;
157 * these are two arrays of pointers. We allocate the
158 * rbio big enough to hold them both and setup their
159 * locations when the rbio is allocated
162 /* pointers to pages that we allocated for
163 * reading/writing stripes directly from the disk (including P/Q)
165 struct page **stripe_pages;
168 * pointers to the pages in the bio_list. Stored
169 * here for faster lookup
171 struct page **bio_pages;
174 * bitmap to record which horizontal stripe has data
176 unsigned long *dbitmap;
179 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
180 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
181 static void rmw_work(struct btrfs_work *work);
182 static void read_rebuild_work(struct btrfs_work *work);
183 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
184 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
185 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
186 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
187 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
188 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
189 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
196 * the stripe hash table is used for locking, and to collect
197 * bios in hopes of making a full stripe
199 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201 struct btrfs_stripe_hash_table *table;
202 struct btrfs_stripe_hash_table *x;
203 struct btrfs_stripe_hash *cur;
204 struct btrfs_stripe_hash *h;
205 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
209 if (info->stripe_hash_table)
213 * The table is large, starting with order 4 and can go as high as
214 * order 7 in case lock debugging is turned on.
216 * Try harder to allocate and fallback to vmalloc to lower the chance
217 * of a failing mount.
219 table_size = sizeof(*table) + sizeof(*h) * num_entries;
220 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
222 table = vzalloc(table_size);
227 spin_lock_init(&table->cache_lock);
228 INIT_LIST_HEAD(&table->stripe_cache);
232 for (i = 0; i < num_entries; i++) {
234 INIT_LIST_HEAD(&cur->hash_list);
235 spin_lock_init(&cur->lock);
236 init_waitqueue_head(&cur->wait);
239 x = cmpxchg(&info->stripe_hash_table, NULL, table);
241 if (is_vmalloc_addr(x))
250 * caching an rbio means to copy anything from the
251 * bio_pages array into the stripe_pages array. We
252 * use the page uptodate bit in the stripe cache array
253 * to indicate if it has valid data
255 * once the caching is done, we set the cache ready
258 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
265 ret = alloc_rbio_pages(rbio);
269 for (i = 0; i < rbio->nr_pages; i++) {
270 if (!rbio->bio_pages[i])
273 s = kmap(rbio->bio_pages[i]);
274 d = kmap(rbio->stripe_pages[i]);
276 memcpy(d, s, PAGE_CACHE_SIZE);
278 kunmap(rbio->bio_pages[i]);
279 kunmap(rbio->stripe_pages[i]);
280 SetPageUptodate(rbio->stripe_pages[i]);
282 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
286 * we hash on the first logical address of the stripe
288 static int rbio_bucket(struct btrfs_raid_bio *rbio)
290 u64 num = rbio->bbio->raid_map[0];
293 * we shift down quite a bit. We're using byte
294 * addressing, and most of the lower bits are zeros.
295 * This tends to upset hash_64, and it consistently
296 * returns just one or two different values.
298 * shifting off the lower bits fixes things.
300 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
304 * stealing an rbio means taking all the uptodate pages from the stripe
305 * array in the source rbio and putting them into the destination rbio
307 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
313 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
316 for (i = 0; i < dest->nr_pages; i++) {
317 s = src->stripe_pages[i];
318 if (!s || !PageUptodate(s)) {
322 d = dest->stripe_pages[i];
326 dest->stripe_pages[i] = s;
327 src->stripe_pages[i] = NULL;
332 * merging means we take the bio_list from the victim and
333 * splice it into the destination. The victim should
334 * be discarded afterwards.
336 * must be called with dest->rbio_list_lock held
338 static void merge_rbio(struct btrfs_raid_bio *dest,
339 struct btrfs_raid_bio *victim)
341 bio_list_merge(&dest->bio_list, &victim->bio_list);
342 dest->bio_list_bytes += victim->bio_list_bytes;
343 dest->generic_bio_cnt += victim->generic_bio_cnt;
344 bio_list_init(&victim->bio_list);
348 * used to prune items that are in the cache. The caller
349 * must hold the hash table lock.
351 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
353 int bucket = rbio_bucket(rbio);
354 struct btrfs_stripe_hash_table *table;
355 struct btrfs_stripe_hash *h;
359 * check the bit again under the hash table lock.
361 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
364 table = rbio->fs_info->stripe_hash_table;
365 h = table->table + bucket;
367 /* hold the lock for the bucket because we may be
368 * removing it from the hash table
373 * hold the lock for the bio list because we need
374 * to make sure the bio list is empty
376 spin_lock(&rbio->bio_list_lock);
378 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
379 list_del_init(&rbio->stripe_cache);
380 table->cache_size -= 1;
383 /* if the bio list isn't empty, this rbio is
384 * still involved in an IO. We take it out
385 * of the cache list, and drop the ref that
386 * was held for the list.
388 * If the bio_list was empty, we also remove
389 * the rbio from the hash_table, and drop
390 * the corresponding ref
392 if (bio_list_empty(&rbio->bio_list)) {
393 if (!list_empty(&rbio->hash_list)) {
394 list_del_init(&rbio->hash_list);
395 atomic_dec(&rbio->refs);
396 BUG_ON(!list_empty(&rbio->plug_list));
401 spin_unlock(&rbio->bio_list_lock);
402 spin_unlock(&h->lock);
405 __free_raid_bio(rbio);
409 * prune a given rbio from the cache
411 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
413 struct btrfs_stripe_hash_table *table;
416 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
419 table = rbio->fs_info->stripe_hash_table;
421 spin_lock_irqsave(&table->cache_lock, flags);
422 __remove_rbio_from_cache(rbio);
423 spin_unlock_irqrestore(&table->cache_lock, flags);
427 * remove everything in the cache
429 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
431 struct btrfs_stripe_hash_table *table;
433 struct btrfs_raid_bio *rbio;
435 table = info->stripe_hash_table;
437 spin_lock_irqsave(&table->cache_lock, flags);
438 while (!list_empty(&table->stripe_cache)) {
439 rbio = list_entry(table->stripe_cache.next,
440 struct btrfs_raid_bio,
442 __remove_rbio_from_cache(rbio);
444 spin_unlock_irqrestore(&table->cache_lock, flags);
448 * remove all cached entries and free the hash table
451 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
453 if (!info->stripe_hash_table)
455 btrfs_clear_rbio_cache(info);
456 if (is_vmalloc_addr(info->stripe_hash_table))
457 vfree(info->stripe_hash_table);
459 kfree(info->stripe_hash_table);
460 info->stripe_hash_table = NULL;
464 * insert an rbio into the stripe cache. It
465 * must have already been prepared by calling
468 * If this rbio was already cached, it gets
469 * moved to the front of the lru.
471 * If the size of the rbio cache is too big, we
474 static void cache_rbio(struct btrfs_raid_bio *rbio)
476 struct btrfs_stripe_hash_table *table;
479 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
482 table = rbio->fs_info->stripe_hash_table;
484 spin_lock_irqsave(&table->cache_lock, flags);
485 spin_lock(&rbio->bio_list_lock);
487 /* bump our ref if we were not in the list before */
488 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
489 atomic_inc(&rbio->refs);
491 if (!list_empty(&rbio->stripe_cache)){
492 list_move(&rbio->stripe_cache, &table->stripe_cache);
494 list_add(&rbio->stripe_cache, &table->stripe_cache);
495 table->cache_size += 1;
498 spin_unlock(&rbio->bio_list_lock);
500 if (table->cache_size > RBIO_CACHE_SIZE) {
501 struct btrfs_raid_bio *found;
503 found = list_entry(table->stripe_cache.prev,
504 struct btrfs_raid_bio,
508 __remove_rbio_from_cache(found);
511 spin_unlock_irqrestore(&table->cache_lock, flags);
516 * helper function to run the xor_blocks api. It is only
517 * able to do MAX_XOR_BLOCKS at a time, so we need to
520 static void run_xor(void **pages, int src_cnt, ssize_t len)
524 void *dest = pages[src_cnt];
527 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
528 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
530 src_cnt -= xor_src_cnt;
531 src_off += xor_src_cnt;
536 * returns true if the bio list inside this rbio
537 * covers an entire stripe (no rmw required).
538 * Must be called with the bio list lock held, or
539 * at a time when you know it is impossible to add
540 * new bios into the list
542 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
544 unsigned long size = rbio->bio_list_bytes;
547 if (size != rbio->nr_data * rbio->stripe_len)
550 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
554 static int rbio_is_full(struct btrfs_raid_bio *rbio)
559 spin_lock_irqsave(&rbio->bio_list_lock, flags);
560 ret = __rbio_is_full(rbio);
561 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
566 * returns 1 if it is safe to merge two rbios together.
567 * The merging is safe if the two rbios correspond to
568 * the same stripe and if they are both going in the same
569 * direction (read vs write), and if neither one is
570 * locked for final IO
572 * The caller is responsible for locking such that
573 * rmw_locked is safe to test
575 static int rbio_can_merge(struct btrfs_raid_bio *last,
576 struct btrfs_raid_bio *cur)
578 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
579 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
583 * we can't merge with cached rbios, since the
584 * idea is that when we merge the destination
585 * rbio is going to run our IO for us. We can
586 * steal from cached rbio's though, other functions
589 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
590 test_bit(RBIO_CACHE_BIT, &cur->flags))
593 if (last->bbio->raid_map[0] !=
594 cur->bbio->raid_map[0])
597 /* we can't merge with different operations */
598 if (last->operation != cur->operation)
601 * We've need read the full stripe from the drive.
602 * check and repair the parity and write the new results.
604 * We're not allowed to add any new bios to the
605 * bio list here, anyone else that wants to
606 * change this stripe needs to do their own rmw.
608 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
609 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
616 * helper to index into the pstripe
618 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
620 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
621 return rbio->stripe_pages[index];
625 * helper to index into the qstripe, returns null
626 * if there is no qstripe
628 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
630 if (rbio->nr_data + 1 == rbio->real_stripes)
633 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
635 return rbio->stripe_pages[index];
639 * The first stripe in the table for a logical address
640 * has the lock. rbios are added in one of three ways:
642 * 1) Nobody has the stripe locked yet. The rbio is given
643 * the lock and 0 is returned. The caller must start the IO
646 * 2) Someone has the stripe locked, but we're able to merge
647 * with the lock owner. The rbio is freed and the IO will
648 * start automatically along with the existing rbio. 1 is returned.
650 * 3) Someone has the stripe locked, but we're not able to merge.
651 * The rbio is added to the lock owner's plug list, or merged into
652 * an rbio already on the plug list. When the lock owner unlocks,
653 * the next rbio on the list is run and the IO is started automatically.
656 * If we return 0, the caller still owns the rbio and must continue with
657 * IO submission. If we return 1, the caller must assume the rbio has
658 * already been freed.
660 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
662 int bucket = rbio_bucket(rbio);
663 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
664 struct btrfs_raid_bio *cur;
665 struct btrfs_raid_bio *pending;
668 struct btrfs_raid_bio *freeit = NULL;
669 struct btrfs_raid_bio *cache_drop = NULL;
673 spin_lock_irqsave(&h->lock, flags);
674 list_for_each_entry(cur, &h->hash_list, hash_list) {
676 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
677 spin_lock(&cur->bio_list_lock);
679 /* can we steal this cached rbio's pages? */
680 if (bio_list_empty(&cur->bio_list) &&
681 list_empty(&cur->plug_list) &&
682 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
683 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
684 list_del_init(&cur->hash_list);
685 atomic_dec(&cur->refs);
687 steal_rbio(cur, rbio);
689 spin_unlock(&cur->bio_list_lock);
694 /* can we merge into the lock owner? */
695 if (rbio_can_merge(cur, rbio)) {
696 merge_rbio(cur, rbio);
697 spin_unlock(&cur->bio_list_lock);
705 * we couldn't merge with the running
706 * rbio, see if we can merge with the
707 * pending ones. We don't have to
708 * check for rmw_locked because there
709 * is no way they are inside finish_rmw
712 list_for_each_entry(pending, &cur->plug_list,
714 if (rbio_can_merge(pending, rbio)) {
715 merge_rbio(pending, rbio);
716 spin_unlock(&cur->bio_list_lock);
723 /* no merging, put us on the tail of the plug list,
724 * our rbio will be started with the currently
725 * running rbio unlocks
727 list_add_tail(&rbio->plug_list, &cur->plug_list);
728 spin_unlock(&cur->bio_list_lock);
734 atomic_inc(&rbio->refs);
735 list_add(&rbio->hash_list, &h->hash_list);
737 spin_unlock_irqrestore(&h->lock, flags);
739 remove_rbio_from_cache(cache_drop);
741 __free_raid_bio(freeit);
746 * called as rmw or parity rebuild is completed. If the plug list has more
747 * rbios waiting for this stripe, the next one on the list will be started
749 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
752 struct btrfs_stripe_hash *h;
756 bucket = rbio_bucket(rbio);
757 h = rbio->fs_info->stripe_hash_table->table + bucket;
759 if (list_empty(&rbio->plug_list))
762 spin_lock_irqsave(&h->lock, flags);
763 spin_lock(&rbio->bio_list_lock);
765 if (!list_empty(&rbio->hash_list)) {
767 * if we're still cached and there is no other IO
768 * to perform, just leave this rbio here for others
769 * to steal from later
771 if (list_empty(&rbio->plug_list) &&
772 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
774 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
775 BUG_ON(!bio_list_empty(&rbio->bio_list));
779 list_del_init(&rbio->hash_list);
780 atomic_dec(&rbio->refs);
783 * we use the plug list to hold all the rbios
784 * waiting for the chance to lock this stripe.
785 * hand the lock over to one of them.
787 if (!list_empty(&rbio->plug_list)) {
788 struct btrfs_raid_bio *next;
789 struct list_head *head = rbio->plug_list.next;
791 next = list_entry(head, struct btrfs_raid_bio,
794 list_del_init(&rbio->plug_list);
796 list_add(&next->hash_list, &h->hash_list);
797 atomic_inc(&next->refs);
798 spin_unlock(&rbio->bio_list_lock);
799 spin_unlock_irqrestore(&h->lock, flags);
801 if (next->operation == BTRFS_RBIO_READ_REBUILD)
802 async_read_rebuild(next);
803 else if (next->operation == BTRFS_RBIO_WRITE) {
804 steal_rbio(rbio, next);
805 async_rmw_stripe(next);
806 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
807 steal_rbio(rbio, next);
808 async_scrub_parity(next);
812 } else if (waitqueue_active(&h->wait)) {
813 spin_unlock(&rbio->bio_list_lock);
814 spin_unlock_irqrestore(&h->lock, flags);
820 spin_unlock(&rbio->bio_list_lock);
821 spin_unlock_irqrestore(&h->lock, flags);
825 remove_rbio_from_cache(rbio);
828 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
832 WARN_ON(atomic_read(&rbio->refs) < 0);
833 if (!atomic_dec_and_test(&rbio->refs))
836 WARN_ON(!list_empty(&rbio->stripe_cache));
837 WARN_ON(!list_empty(&rbio->hash_list));
838 WARN_ON(!bio_list_empty(&rbio->bio_list));
840 for (i = 0; i < rbio->nr_pages; i++) {
841 if (rbio->stripe_pages[i]) {
842 __free_page(rbio->stripe_pages[i]);
843 rbio->stripe_pages[i] = NULL;
847 btrfs_put_bbio(rbio->bbio);
851 static void free_raid_bio(struct btrfs_raid_bio *rbio)
854 __free_raid_bio(rbio);
858 * this frees the rbio and runs through all the bios in the
859 * bio_list and calls end_io on them
861 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
863 struct bio *cur = bio_list_get(&rbio->bio_list);
866 if (rbio->generic_bio_cnt)
867 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
875 set_bit(BIO_UPTODATE, &cur->bi_flags);
882 * end io function used by finish_rmw. When we finally
883 * get here, we've written a full stripe
885 static void raid_write_end_io(struct bio *bio, int err)
887 struct btrfs_raid_bio *rbio = bio->bi_private;
890 fail_bio_stripe(rbio, bio);
894 if (!atomic_dec_and_test(&rbio->stripes_pending))
899 /* OK, we have read all the stripes we need to. */
900 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
903 rbio_orig_end_io(rbio, err, 0);
908 * the read/modify/write code wants to use the original bio for
909 * any pages it included, and then use the rbio for everything
910 * else. This function decides if a given index (stripe number)
911 * and page number in that stripe fall inside the original bio
914 * if you set bio_list_only, you'll get a NULL back for any ranges
915 * that are outside the bio_list
917 * This doesn't take any refs on anything, you get a bare page pointer
918 * and the caller must bump refs as required.
920 * You must call index_rbio_pages once before you can trust
921 * the answers from this function.
923 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
924 int index, int pagenr, int bio_list_only)
927 struct page *p = NULL;
929 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
931 spin_lock_irq(&rbio->bio_list_lock);
932 p = rbio->bio_pages[chunk_page];
933 spin_unlock_irq(&rbio->bio_list_lock);
935 if (p || bio_list_only)
938 return rbio->stripe_pages[chunk_page];
942 * number of pages we need for the entire stripe across all the
945 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
947 unsigned long nr = stripe_len * nr_stripes;
948 return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
952 * allocation and initial setup for the btrfs_raid_bio. Not
953 * this does not allocate any pages for rbio->pages.
955 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
956 struct btrfs_bio *bbio, u64 stripe_len)
958 struct btrfs_raid_bio *rbio;
960 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
961 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
962 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
965 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
966 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
969 return ERR_PTR(-ENOMEM);
971 bio_list_init(&rbio->bio_list);
972 INIT_LIST_HEAD(&rbio->plug_list);
973 spin_lock_init(&rbio->bio_list_lock);
974 INIT_LIST_HEAD(&rbio->stripe_cache);
975 INIT_LIST_HEAD(&rbio->hash_list);
977 rbio->fs_info = root->fs_info;
978 rbio->stripe_len = stripe_len;
979 rbio->nr_pages = num_pages;
980 rbio->real_stripes = real_stripes;
981 rbio->stripe_npages = stripe_npages;
984 atomic_set(&rbio->refs, 1);
985 atomic_set(&rbio->error, 0);
986 atomic_set(&rbio->stripes_pending, 0);
989 * the stripe_pages and bio_pages array point to the extra
990 * memory we allocated past the end of the rbio
993 rbio->stripe_pages = p;
994 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
995 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
997 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
998 nr_data = real_stripes - 1;
999 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1000 nr_data = real_stripes - 2;
1004 rbio->nr_data = nr_data;
1008 /* allocate pages for all the stripes in the bio, including parity */
1009 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1014 for (i = 0; i < rbio->nr_pages; i++) {
1015 if (rbio->stripe_pages[i])
1017 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1020 rbio->stripe_pages[i] = page;
1021 ClearPageUptodate(page);
1026 /* allocate pages for just the p/q stripes */
1027 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1032 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
1034 for (; i < rbio->nr_pages; i++) {
1035 if (rbio->stripe_pages[i])
1037 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1040 rbio->stripe_pages[i] = page;
1046 * add a single page from a specific stripe into our list of bios for IO
1047 * this will try to merge into existing bios if possible, and returns
1048 * zero if all went well.
1050 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1051 struct bio_list *bio_list,
1054 unsigned long page_index,
1055 unsigned long bio_max_len)
1057 struct bio *last = bio_list->tail;
1061 struct btrfs_bio_stripe *stripe;
1064 stripe = &rbio->bbio->stripes[stripe_nr];
1065 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1067 /* if the device is missing, just fail this stripe */
1068 if (!stripe->dev->bdev)
1069 return fail_rbio_index(rbio, stripe_nr);
1071 /* see if we can add this page onto our existing bio */
1073 last_end = (u64)last->bi_iter.bi_sector << 9;
1074 last_end += last->bi_iter.bi_size;
1077 * we can't merge these if they are from different
1078 * devices or if they are not contiguous
1080 if (last_end == disk_start && stripe->dev->bdev &&
1081 test_bit(BIO_UPTODATE, &last->bi_flags) &&
1082 last->bi_bdev == stripe->dev->bdev) {
1083 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1084 if (ret == PAGE_CACHE_SIZE)
1089 /* put a new bio on the list */
1090 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1094 bio->bi_iter.bi_size = 0;
1095 bio->bi_bdev = stripe->dev->bdev;
1096 bio->bi_iter.bi_sector = disk_start >> 9;
1097 set_bit(BIO_UPTODATE, &bio->bi_flags);
1099 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1100 bio_list_add(bio_list, bio);
1105 * while we're doing the read/modify/write cycle, we could
1106 * have errors in reading pages off the disk. This checks
1107 * for errors and if we're not able to read the page it'll
1108 * trigger parity reconstruction. The rmw will be finished
1109 * after we've reconstructed the failed stripes
1111 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1113 if (rbio->faila >= 0 || rbio->failb >= 0) {
1114 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1115 __raid56_parity_recover(rbio);
1122 * these are just the pages from the rbio array, not from anything
1123 * the FS sent down to us
1125 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1128 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1130 return rbio->stripe_pages[index];
1134 * helper function to walk our bio list and populate the bio_pages array with
1135 * the result. This seems expensive, but it is faster than constantly
1136 * searching through the bio list as we setup the IO in finish_rmw or stripe
1139 * This must be called before you trust the answers from page_in_rbio
1141 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145 unsigned long stripe_offset;
1146 unsigned long page_index;
1150 spin_lock_irq(&rbio->bio_list_lock);
1151 bio_list_for_each(bio, &rbio->bio_list) {
1152 start = (u64)bio->bi_iter.bi_sector << 9;
1153 stripe_offset = start - rbio->bbio->raid_map[0];
1154 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1156 for (i = 0; i < bio->bi_vcnt; i++) {
1157 p = bio->bi_io_vec[i].bv_page;
1158 rbio->bio_pages[page_index + i] = p;
1161 spin_unlock_irq(&rbio->bio_list_lock);
1165 * this is called from one of two situations. We either
1166 * have a full stripe from the higher layers, or we've read all
1167 * the missing bits off disk.
1169 * This will calculate the parity and then send down any
1172 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1174 struct btrfs_bio *bbio = rbio->bbio;
1175 void *pointers[rbio->real_stripes];
1176 int stripe_len = rbio->stripe_len;
1177 int nr_data = rbio->nr_data;
1182 struct bio_list bio_list;
1184 int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1187 bio_list_init(&bio_list);
1189 if (rbio->real_stripes - rbio->nr_data == 1) {
1190 p_stripe = rbio->real_stripes - 1;
1191 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1192 p_stripe = rbio->real_stripes - 2;
1193 q_stripe = rbio->real_stripes - 1;
1198 /* at this point we either have a full stripe,
1199 * or we've read the full stripe from the drive.
1200 * recalculate the parity and write the new results.
1202 * We're not allowed to add any new bios to the
1203 * bio list here, anyone else that wants to
1204 * change this stripe needs to do their own rmw.
1206 spin_lock_irq(&rbio->bio_list_lock);
1207 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1208 spin_unlock_irq(&rbio->bio_list_lock);
1210 atomic_set(&rbio->error, 0);
1213 * now that we've set rmw_locked, run through the
1214 * bio list one last time and map the page pointers
1216 * We don't cache full rbios because we're assuming
1217 * the higher layers are unlikely to use this area of
1218 * the disk again soon. If they do use it again,
1219 * hopefully they will send another full bio.
1221 index_rbio_pages(rbio);
1222 if (!rbio_is_full(rbio))
1223 cache_rbio_pages(rbio);
1225 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1227 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1229 /* first collect one page from each data stripe */
1230 for (stripe = 0; stripe < nr_data; stripe++) {
1231 p = page_in_rbio(rbio, stripe, pagenr, 0);
1232 pointers[stripe] = kmap(p);
1235 /* then add the parity stripe */
1236 p = rbio_pstripe_page(rbio, pagenr);
1238 pointers[stripe++] = kmap(p);
1240 if (q_stripe != -1) {
1243 * raid6, add the qstripe and call the
1244 * library function to fill in our p/q
1246 p = rbio_qstripe_page(rbio, pagenr);
1248 pointers[stripe++] = kmap(p);
1250 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1254 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1255 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1259 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1260 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1264 * time to start writing. Make bios for everything from the
1265 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1268 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1269 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1271 if (stripe < rbio->nr_data) {
1272 page = page_in_rbio(rbio, stripe, pagenr, 1);
1276 page = rbio_stripe_page(rbio, stripe, pagenr);
1279 ret = rbio_add_io_page(rbio, &bio_list,
1280 page, stripe, pagenr, rbio->stripe_len);
1286 if (likely(!bbio->num_tgtdevs))
1289 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1290 if (!bbio->tgtdev_map[stripe])
1293 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1295 if (stripe < rbio->nr_data) {
1296 page = page_in_rbio(rbio, stripe, pagenr, 1);
1300 page = rbio_stripe_page(rbio, stripe, pagenr);
1303 ret = rbio_add_io_page(rbio, &bio_list, page,
1304 rbio->bbio->tgtdev_map[stripe],
1305 pagenr, rbio->stripe_len);
1312 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1313 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1316 bio = bio_list_pop(&bio_list);
1320 bio->bi_private = rbio;
1321 bio->bi_end_io = raid_write_end_io;
1322 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1323 submit_bio(WRITE, bio);
1328 rbio_orig_end_io(rbio, -EIO, 0);
1332 * helper to find the stripe number for a given bio. Used to figure out which
1333 * stripe has failed. This expects the bio to correspond to a physical disk,
1334 * so it looks up based on physical sector numbers.
1336 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1339 u64 physical = bio->bi_iter.bi_sector;
1342 struct btrfs_bio_stripe *stripe;
1346 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1347 stripe = &rbio->bbio->stripes[i];
1348 stripe_start = stripe->physical;
1349 if (physical >= stripe_start &&
1350 physical < stripe_start + rbio->stripe_len &&
1351 bio->bi_bdev == stripe->dev->bdev) {
1359 * helper to find the stripe number for a given
1360 * bio (before mapping). Used to figure out which stripe has
1361 * failed. This looks up based on logical block numbers.
1363 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1366 u64 logical = bio->bi_iter.bi_sector;
1372 for (i = 0; i < rbio->nr_data; i++) {
1373 stripe_start = rbio->bbio->raid_map[i];
1374 if (logical >= stripe_start &&
1375 logical < stripe_start + rbio->stripe_len) {
1383 * returns -EIO if we had too many failures
1385 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1387 unsigned long flags;
1390 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1392 /* we already know this stripe is bad, move on */
1393 if (rbio->faila == failed || rbio->failb == failed)
1396 if (rbio->faila == -1) {
1397 /* first failure on this rbio */
1398 rbio->faila = failed;
1399 atomic_inc(&rbio->error);
1400 } else if (rbio->failb == -1) {
1401 /* second failure on this rbio */
1402 rbio->failb = failed;
1403 atomic_inc(&rbio->error);
1408 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1414 * helper to fail a stripe based on a physical disk
1417 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1420 int failed = find_bio_stripe(rbio, bio);
1425 return fail_rbio_index(rbio, failed);
1429 * this sets each page in the bio uptodate. It should only be used on private
1430 * rbio pages, nothing that comes in from the higher layers
1432 static void set_bio_pages_uptodate(struct bio *bio)
1437 for (i = 0; i < bio->bi_vcnt; i++) {
1438 p = bio->bi_io_vec[i].bv_page;
1444 * end io for the read phase of the rmw cycle. All the bios here are physical
1445 * stripe bios we've read from the disk so we can recalculate the parity of the
1448 * This will usually kick off finish_rmw once all the bios are read in, but it
1449 * may trigger parity reconstruction if we had any errors along the way
1451 static void raid_rmw_end_io(struct bio *bio, int err)
1453 struct btrfs_raid_bio *rbio = bio->bi_private;
1456 fail_bio_stripe(rbio, bio);
1458 set_bio_pages_uptodate(bio);
1462 if (!atomic_dec_and_test(&rbio->stripes_pending))
1466 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1470 * this will normally call finish_rmw to start our write
1471 * but if there are any failed stripes we'll reconstruct
1474 validate_rbio_for_rmw(rbio);
1479 rbio_orig_end_io(rbio, -EIO, 0);
1482 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1484 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1485 rmw_work, NULL, NULL);
1487 btrfs_queue_work(rbio->fs_info->rmw_workers,
1491 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1493 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1494 read_rebuild_work, NULL, NULL);
1496 btrfs_queue_work(rbio->fs_info->rmw_workers,
1501 * the stripe must be locked by the caller. It will
1502 * unlock after all the writes are done
1504 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1506 int bios_to_read = 0;
1507 struct bio_list bio_list;
1509 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1514 bio_list_init(&bio_list);
1516 ret = alloc_rbio_pages(rbio);
1520 index_rbio_pages(rbio);
1522 atomic_set(&rbio->error, 0);
1524 * build a list of bios to read all the missing parts of this
1527 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1528 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1531 * we want to find all the pages missing from
1532 * the rbio and read them from the disk. If
1533 * page_in_rbio finds a page in the bio list
1534 * we don't need to read it off the stripe.
1536 page = page_in_rbio(rbio, stripe, pagenr, 1);
1540 page = rbio_stripe_page(rbio, stripe, pagenr);
1542 * the bio cache may have handed us an uptodate
1543 * page. If so, be happy and use it
1545 if (PageUptodate(page))
1548 ret = rbio_add_io_page(rbio, &bio_list, page,
1549 stripe, pagenr, rbio->stripe_len);
1555 bios_to_read = bio_list_size(&bio_list);
1556 if (!bios_to_read) {
1558 * this can happen if others have merged with
1559 * us, it means there is nothing left to read.
1560 * But if there are missing devices it may not be
1561 * safe to do the full stripe write yet.
1567 * the bbio may be freed once we submit the last bio. Make sure
1568 * not to touch it after that
1570 atomic_set(&rbio->stripes_pending, bios_to_read);
1572 bio = bio_list_pop(&bio_list);
1576 bio->bi_private = rbio;
1577 bio->bi_end_io = raid_rmw_end_io;
1579 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1580 BTRFS_WQ_ENDIO_RAID56);
1582 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1583 submit_bio(READ, bio);
1585 /* the actual write will happen once the reads are done */
1589 rbio_orig_end_io(rbio, -EIO, 0);
1593 validate_rbio_for_rmw(rbio);
1598 * if the upper layers pass in a full stripe, we thank them by only allocating
1599 * enough pages to hold the parity, and sending it all down quickly.
1601 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1605 ret = alloc_rbio_parity_pages(rbio);
1607 __free_raid_bio(rbio);
1611 ret = lock_stripe_add(rbio);
1618 * partial stripe writes get handed over to async helpers.
1619 * We're really hoping to merge a few more writes into this
1620 * rbio before calculating new parity
1622 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1626 ret = lock_stripe_add(rbio);
1628 async_rmw_stripe(rbio);
1633 * sometimes while we were reading from the drive to
1634 * recalculate parity, enough new bios come into create
1635 * a full stripe. So we do a check here to see if we can
1636 * go directly to finish_rmw
1638 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1640 /* head off into rmw land if we don't have a full stripe */
1641 if (!rbio_is_full(rbio))
1642 return partial_stripe_write(rbio);
1643 return full_stripe_write(rbio);
1647 * We use plugging call backs to collect full stripes.
1648 * Any time we get a partial stripe write while plugged
1649 * we collect it into a list. When the unplug comes down,
1650 * we sort the list by logical block number and merge
1651 * everything we can into the same rbios
1653 struct btrfs_plug_cb {
1654 struct blk_plug_cb cb;
1655 struct btrfs_fs_info *info;
1656 struct list_head rbio_list;
1657 struct btrfs_work work;
1661 * rbios on the plug list are sorted for easier merging.
1663 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1665 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1667 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1669 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1670 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1672 if (a_sector < b_sector)
1674 if (a_sector > b_sector)
1679 static void run_plug(struct btrfs_plug_cb *plug)
1681 struct btrfs_raid_bio *cur;
1682 struct btrfs_raid_bio *last = NULL;
1685 * sort our plug list then try to merge
1686 * everything we can in hopes of creating full
1689 list_sort(NULL, &plug->rbio_list, plug_cmp);
1690 while (!list_empty(&plug->rbio_list)) {
1691 cur = list_entry(plug->rbio_list.next,
1692 struct btrfs_raid_bio, plug_list);
1693 list_del_init(&cur->plug_list);
1695 if (rbio_is_full(cur)) {
1696 /* we have a full stripe, send it down */
1697 full_stripe_write(cur);
1701 if (rbio_can_merge(last, cur)) {
1702 merge_rbio(last, cur);
1703 __free_raid_bio(cur);
1707 __raid56_parity_write(last);
1712 __raid56_parity_write(last);
1718 * if the unplug comes from schedule, we have to push the
1719 * work off to a helper thread
1721 static void unplug_work(struct btrfs_work *work)
1723 struct btrfs_plug_cb *plug;
1724 plug = container_of(work, struct btrfs_plug_cb, work);
1728 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1730 struct btrfs_plug_cb *plug;
1731 plug = container_of(cb, struct btrfs_plug_cb, cb);
1733 if (from_schedule) {
1734 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1735 unplug_work, NULL, NULL);
1736 btrfs_queue_work(plug->info->rmw_workers,
1744 * our main entry point for writes from the rest of the FS.
1746 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1747 struct btrfs_bio *bbio, u64 stripe_len)
1749 struct btrfs_raid_bio *rbio;
1750 struct btrfs_plug_cb *plug = NULL;
1751 struct blk_plug_cb *cb;
1754 rbio = alloc_rbio(root, bbio, stripe_len);
1756 btrfs_put_bbio(bbio);
1757 return PTR_ERR(rbio);
1759 bio_list_add(&rbio->bio_list, bio);
1760 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1761 rbio->operation = BTRFS_RBIO_WRITE;
1763 btrfs_bio_counter_inc_noblocked(root->fs_info);
1764 rbio->generic_bio_cnt = 1;
1767 * don't plug on full rbios, just get them out the door
1768 * as quickly as we can
1770 if (rbio_is_full(rbio)) {
1771 ret = full_stripe_write(rbio);
1773 btrfs_bio_counter_dec(root->fs_info);
1777 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1780 plug = container_of(cb, struct btrfs_plug_cb, cb);
1782 plug->info = root->fs_info;
1783 INIT_LIST_HEAD(&plug->rbio_list);
1785 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1788 ret = __raid56_parity_write(rbio);
1790 btrfs_bio_counter_dec(root->fs_info);
1796 * all parity reconstruction happens here. We've read in everything
1797 * we can find from the drives and this does the heavy lifting of
1798 * sorting the good from the bad.
1800 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1804 int faila = -1, failb = -1;
1805 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
1810 pointers = kzalloc(rbio->real_stripes * sizeof(void *),
1817 faila = rbio->faila;
1818 failb = rbio->failb;
1820 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1821 spin_lock_irq(&rbio->bio_list_lock);
1822 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1823 spin_unlock_irq(&rbio->bio_list_lock);
1826 index_rbio_pages(rbio);
1828 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1830 * Now we just use bitmap to mark the horizontal stripes in
1831 * which we have data when doing parity scrub.
1833 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1834 !test_bit(pagenr, rbio->dbitmap))
1837 /* setup our array of pointers with pages
1840 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1842 * if we're rebuilding a read, we have to use
1843 * pages from the bio list
1845 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1846 (stripe == faila || stripe == failb)) {
1847 page = page_in_rbio(rbio, stripe, pagenr, 0);
1849 page = rbio_stripe_page(rbio, stripe, pagenr);
1851 pointers[stripe] = kmap(page);
1854 /* all raid6 handling here */
1855 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1857 * single failure, rebuild from parity raid5
1861 if (faila == rbio->nr_data) {
1863 * Just the P stripe has failed, without
1864 * a bad data or Q stripe.
1865 * TODO, we should redo the xor here.
1871 * a single failure in raid6 is rebuilt
1872 * in the pstripe code below
1877 /* make sure our ps and qs are in order */
1878 if (faila > failb) {
1884 /* if the q stripe is failed, do a pstripe reconstruction
1886 * If both the q stripe and the P stripe are failed, we're
1887 * here due to a crc mismatch and we can't give them the
1890 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1891 if (rbio->bbio->raid_map[faila] ==
1897 * otherwise we have one bad data stripe and
1898 * a good P stripe. raid5!
1903 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1904 raid6_datap_recov(rbio->real_stripes,
1905 PAGE_SIZE, faila, pointers);
1907 raid6_2data_recov(rbio->real_stripes,
1908 PAGE_SIZE, faila, failb,
1914 /* rebuild from P stripe here (raid5 or raid6) */
1915 BUG_ON(failb != -1);
1917 /* Copy parity block into failed block to start with */
1918 memcpy(pointers[faila],
1919 pointers[rbio->nr_data],
1922 /* rearrange the pointer array */
1923 p = pointers[faila];
1924 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1925 pointers[stripe] = pointers[stripe + 1];
1926 pointers[rbio->nr_data - 1] = p;
1928 /* xor in the rest */
1929 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1931 /* if we're doing this rebuild as part of an rmw, go through
1932 * and set all of our private rbio pages in the
1933 * failed stripes as uptodate. This way finish_rmw will
1934 * know they can be trusted. If this was a read reconstruction,
1935 * other endio functions will fiddle the uptodate bits
1937 if (rbio->operation == BTRFS_RBIO_WRITE) {
1938 for (i = 0; i < nr_pages; i++) {
1940 page = rbio_stripe_page(rbio, faila, i);
1941 SetPageUptodate(page);
1944 page = rbio_stripe_page(rbio, failb, i);
1945 SetPageUptodate(page);
1949 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1951 * if we're rebuilding a read, we have to use
1952 * pages from the bio list
1954 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1955 (stripe == faila || stripe == failb)) {
1956 page = page_in_rbio(rbio, stripe, pagenr, 0);
1958 page = rbio_stripe_page(rbio, stripe, pagenr);
1969 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1971 cache_rbio_pages(rbio);
1973 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1975 rbio_orig_end_io(rbio, err, err == 0);
1976 } else if (err == 0) {
1980 if (rbio->operation == BTRFS_RBIO_WRITE)
1982 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1983 finish_parity_scrub(rbio, 0);
1987 rbio_orig_end_io(rbio, err, 0);
1992 * This is called only for stripes we've read from disk to
1993 * reconstruct the parity.
1995 static void raid_recover_end_io(struct bio *bio, int err)
1997 struct btrfs_raid_bio *rbio = bio->bi_private;
2000 * we only read stripe pages off the disk, set them
2001 * up to date if there were no errors
2004 fail_bio_stripe(rbio, bio);
2006 set_bio_pages_uptodate(bio);
2009 if (!atomic_dec_and_test(&rbio->stripes_pending))
2012 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2013 rbio_orig_end_io(rbio, -EIO, 0);
2015 __raid_recover_end_io(rbio);
2019 * reads everything we need off the disk to reconstruct
2020 * the parity. endio handlers trigger final reconstruction
2021 * when the IO is done.
2023 * This is used both for reads from the higher layers and for
2024 * parity construction required to finish a rmw cycle.
2026 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2028 int bios_to_read = 0;
2029 struct bio_list bio_list;
2031 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
2036 bio_list_init(&bio_list);
2038 ret = alloc_rbio_pages(rbio);
2042 atomic_set(&rbio->error, 0);
2045 * read everything that hasn't failed. Thanks to the
2046 * stripe cache, it is possible that some or all of these
2047 * pages are going to be uptodate.
2049 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2050 if (rbio->faila == stripe || rbio->failb == stripe) {
2051 atomic_inc(&rbio->error);
2055 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
2059 * the rmw code may have already read this
2062 p = rbio_stripe_page(rbio, stripe, pagenr);
2063 if (PageUptodate(p))
2066 ret = rbio_add_io_page(rbio, &bio_list,
2067 rbio_stripe_page(rbio, stripe, pagenr),
2068 stripe, pagenr, rbio->stripe_len);
2074 bios_to_read = bio_list_size(&bio_list);
2075 if (!bios_to_read) {
2077 * we might have no bios to read just because the pages
2078 * were up to date, or we might have no bios to read because
2079 * the devices were gone.
2081 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2082 __raid_recover_end_io(rbio);
2090 * the bbio may be freed once we submit the last bio. Make sure
2091 * not to touch it after that
2093 atomic_set(&rbio->stripes_pending, bios_to_read);
2095 bio = bio_list_pop(&bio_list);
2099 bio->bi_private = rbio;
2100 bio->bi_end_io = raid_recover_end_io;
2102 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2103 BTRFS_WQ_ENDIO_RAID56);
2105 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2106 submit_bio(READ, bio);
2112 if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
2113 rbio_orig_end_io(rbio, -EIO, 0);
2118 * the main entry point for reads from the higher layers. This
2119 * is really only called when the normal read path had a failure,
2120 * so we assume the bio they send down corresponds to a failed part
2123 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2124 struct btrfs_bio *bbio, u64 stripe_len,
2125 int mirror_num, int generic_io)
2127 struct btrfs_raid_bio *rbio;
2130 rbio = alloc_rbio(root, bbio, stripe_len);
2133 btrfs_put_bbio(bbio);
2134 return PTR_ERR(rbio);
2137 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2138 bio_list_add(&rbio->bio_list, bio);
2139 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2141 rbio->faila = find_logical_bio_stripe(rbio, bio);
2142 if (rbio->faila == -1) {
2145 btrfs_put_bbio(bbio);
2151 btrfs_bio_counter_inc_noblocked(root->fs_info);
2152 rbio->generic_bio_cnt = 1;
2154 btrfs_get_bbio(bbio);
2158 * reconstruct from the q stripe if they are
2159 * asking for mirror 3
2161 if (mirror_num == 3)
2162 rbio->failb = rbio->real_stripes - 2;
2164 ret = lock_stripe_add(rbio);
2167 * __raid56_parity_recover will end the bio with
2168 * any errors it hits. We don't want to return
2169 * its error value up the stack because our caller
2170 * will end up calling bio_endio with any nonzero
2174 __raid56_parity_recover(rbio);
2176 * our rbio has been added to the list of
2177 * rbios that will be handled after the
2178 * currently lock owner is done
2184 static void rmw_work(struct btrfs_work *work)
2186 struct btrfs_raid_bio *rbio;
2188 rbio = container_of(work, struct btrfs_raid_bio, work);
2189 raid56_rmw_stripe(rbio);
2192 static void read_rebuild_work(struct btrfs_work *work)
2194 struct btrfs_raid_bio *rbio;
2196 rbio = container_of(work, struct btrfs_raid_bio, work);
2197 __raid56_parity_recover(rbio);
2201 * The following code is used to scrub/replace the parity stripe
2203 * Note: We need make sure all the pages that add into the scrub/replace
2204 * raid bio are correct and not be changed during the scrub/replace. That
2205 * is those pages just hold metadata or file data with checksum.
2208 struct btrfs_raid_bio *
2209 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2210 struct btrfs_bio *bbio, u64 stripe_len,
2211 struct btrfs_device *scrub_dev,
2212 unsigned long *dbitmap, int stripe_nsectors)
2214 struct btrfs_raid_bio *rbio;
2217 rbio = alloc_rbio(root, bbio, stripe_len);
2220 bio_list_add(&rbio->bio_list, bio);
2222 * This is a special bio which is used to hold the completion handler
2223 * and make the scrub rbio is similar to the other types
2225 ASSERT(!bio->bi_iter.bi_size);
2226 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2228 for (i = 0; i < rbio->real_stripes; i++) {
2229 if (bbio->stripes[i].dev == scrub_dev) {
2235 /* Now we just support the sectorsize equals to page size */
2236 ASSERT(root->sectorsize == PAGE_SIZE);
2237 ASSERT(rbio->stripe_npages == stripe_nsectors);
2238 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2243 void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
2244 struct page *page, u64 logical)
2249 ASSERT(logical >= rbio->bbio->raid_map[0]);
2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251 rbio->stripe_len * rbio->nr_data);
2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253 index = stripe_offset >> PAGE_CACHE_SHIFT;
2254 rbio->bio_pages[index] = page;
2258 * We just scrub the parity that we have correct data on the same horizontal,
2259 * so we needn't allocate all pages for all the stripes.
2261 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2268 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2269 for (i = 0; i < rbio->real_stripes; i++) {
2270 index = i * rbio->stripe_npages + bit;
2271 if (rbio->stripe_pages[index])
2274 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2277 rbio->stripe_pages[index] = page;
2278 ClearPageUptodate(page);
2285 * end io function used by finish_rmw. When we finally
2286 * get here, we've written a full stripe
2288 static void raid_write_parity_end_io(struct bio *bio, int err)
2290 struct btrfs_raid_bio *rbio = bio->bi_private;
2293 fail_bio_stripe(rbio, bio);
2297 if (!atomic_dec_and_test(&rbio->stripes_pending))
2302 if (atomic_read(&rbio->error))
2305 rbio_orig_end_io(rbio, err, 0);
2308 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2311 struct btrfs_bio *bbio = rbio->bbio;
2312 void *pointers[rbio->real_stripes];
2313 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2314 int nr_data = rbio->nr_data;
2319 struct page *p_page = NULL;
2320 struct page *q_page = NULL;
2321 struct bio_list bio_list;
2326 bio_list_init(&bio_list);
2328 if (rbio->real_stripes - rbio->nr_data == 1) {
2329 p_stripe = rbio->real_stripes - 1;
2330 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2331 p_stripe = rbio->real_stripes - 2;
2332 q_stripe = rbio->real_stripes - 1;
2337 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2339 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2343 * Because the higher layers(scrubber) are unlikely to
2344 * use this area of the disk again soon, so don't cache
2347 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2352 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2355 SetPageUptodate(p_page);
2357 if (q_stripe != -1) {
2358 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2360 __free_page(p_page);
2363 SetPageUptodate(q_page);
2366 atomic_set(&rbio->error, 0);
2368 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2371 /* first collect one page from each data stripe */
2372 for (stripe = 0; stripe < nr_data; stripe++) {
2373 p = page_in_rbio(rbio, stripe, pagenr, 0);
2374 pointers[stripe] = kmap(p);
2377 /* then add the parity stripe */
2378 pointers[stripe++] = kmap(p_page);
2380 if (q_stripe != -1) {
2383 * raid6, add the qstripe and call the
2384 * library function to fill in our p/q
2386 pointers[stripe++] = kmap(q_page);
2388 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2392 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2393 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
2396 /* Check scrubbing pairty and repair it */
2397 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2399 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
2400 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
2402 /* Parity is right, needn't writeback */
2403 bitmap_clear(rbio->dbitmap, pagenr, 1);
2406 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2407 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2410 __free_page(p_page);
2412 __free_page(q_page);
2416 * time to start writing. Make bios for everything from the
2417 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2420 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2423 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2424 ret = rbio_add_io_page(rbio, &bio_list,
2425 page, rbio->scrubp, pagenr, rbio->stripe_len);
2433 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2436 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2437 ret = rbio_add_io_page(rbio, &bio_list, page,
2438 bbio->tgtdev_map[rbio->scrubp],
2439 pagenr, rbio->stripe_len);
2445 nr_data = bio_list_size(&bio_list);
2447 /* Every parity is right */
2448 rbio_orig_end_io(rbio, 0, 0);
2452 atomic_set(&rbio->stripes_pending, nr_data);
2455 bio = bio_list_pop(&bio_list);
2459 bio->bi_private = rbio;
2460 bio->bi_end_io = raid_write_parity_end_io;
2461 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2462 submit_bio(WRITE, bio);
2467 rbio_orig_end_io(rbio, -EIO, 0);
2470 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2472 if (stripe >= 0 && stripe < rbio->nr_data)
2478 * While we're doing the parity check and repair, we could have errors
2479 * in reading pages off the disk. This checks for errors and if we're
2480 * not able to read the page it'll trigger parity reconstruction. The
2481 * parity scrub will be finished after we've reconstructed the failed
2484 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2486 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2489 if (rbio->faila >= 0 || rbio->failb >= 0) {
2490 int dfail = 0, failp = -1;
2492 if (is_data_stripe(rbio, rbio->faila))
2494 else if (is_parity_stripe(rbio->faila))
2495 failp = rbio->faila;
2497 if (is_data_stripe(rbio, rbio->failb))
2499 else if (is_parity_stripe(rbio->failb))
2500 failp = rbio->failb;
2503 * Because we can not use a scrubbing parity to repair
2504 * the data, so the capability of the repair is declined.
2505 * (In the case of RAID5, we can not repair anything)
2507 if (dfail > rbio->bbio->max_errors - 1)
2511 * If all data is good, only parity is correctly, just
2512 * repair the parity.
2515 finish_parity_scrub(rbio, 0);
2520 * Here means we got one corrupted data stripe and one
2521 * corrupted parity on RAID6, if the corrupted parity
2522 * is scrubbing parity, luckly, use the other one to repair
2523 * the data, or we can not repair the data stripe.
2525 if (failp != rbio->scrubp)
2528 __raid_recover_end_io(rbio);
2530 finish_parity_scrub(rbio, 1);
2535 rbio_orig_end_io(rbio, -EIO, 0);
2539 * end io for the read phase of the rmw cycle. All the bios here are physical
2540 * stripe bios we've read from the disk so we can recalculate the parity of the
2543 * This will usually kick off finish_rmw once all the bios are read in, but it
2544 * may trigger parity reconstruction if we had any errors along the way
2546 static void raid56_parity_scrub_end_io(struct bio *bio, int err)
2548 struct btrfs_raid_bio *rbio = bio->bi_private;
2551 fail_bio_stripe(rbio, bio);
2553 set_bio_pages_uptodate(bio);
2557 if (!atomic_dec_and_test(&rbio->stripes_pending))
2561 * this will normally call finish_rmw to start our write
2562 * but if there are any failed stripes we'll reconstruct
2565 validate_rbio_for_parity_scrub(rbio);
2568 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2570 int bios_to_read = 0;
2571 struct bio_list bio_list;
2577 ret = alloc_rbio_essential_pages(rbio);
2581 bio_list_init(&bio_list);
2583 atomic_set(&rbio->error, 0);
2585 * build a list of bios to read all the missing parts of this
2588 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2589 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2592 * we want to find all the pages missing from
2593 * the rbio and read them from the disk. If
2594 * page_in_rbio finds a page in the bio list
2595 * we don't need to read it off the stripe.
2597 page = page_in_rbio(rbio, stripe, pagenr, 1);
2601 page = rbio_stripe_page(rbio, stripe, pagenr);
2603 * the bio cache may have handed us an uptodate
2604 * page. If so, be happy and use it
2606 if (PageUptodate(page))
2609 ret = rbio_add_io_page(rbio, &bio_list, page,
2610 stripe, pagenr, rbio->stripe_len);
2616 bios_to_read = bio_list_size(&bio_list);
2617 if (!bios_to_read) {
2619 * this can happen if others have merged with
2620 * us, it means there is nothing left to read.
2621 * But if there are missing devices it may not be
2622 * safe to do the full stripe write yet.
2628 * the bbio may be freed once we submit the last bio. Make sure
2629 * not to touch it after that
2631 atomic_set(&rbio->stripes_pending, bios_to_read);
2633 bio = bio_list_pop(&bio_list);
2637 bio->bi_private = rbio;
2638 bio->bi_end_io = raid56_parity_scrub_end_io;
2640 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2641 BTRFS_WQ_ENDIO_RAID56);
2643 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2644 submit_bio(READ, bio);
2646 /* the actual write will happen once the reads are done */
2650 rbio_orig_end_io(rbio, -EIO, 0);
2654 validate_rbio_for_parity_scrub(rbio);
2657 static void scrub_parity_work(struct btrfs_work *work)
2659 struct btrfs_raid_bio *rbio;
2661 rbio = container_of(work, struct btrfs_raid_bio, work);
2662 raid56_parity_scrub_stripe(rbio);
2665 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2667 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2668 scrub_parity_work, NULL, NULL);
2670 btrfs_queue_work(rbio->fs_info->rmw_workers,
2674 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2676 if (!lock_stripe_add(rbio))
2677 async_scrub_parity(rbio);