2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/hdreg.h>
43 #include <linux/proc_fs.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/file.h>
47 #include <linux/delay.h>
48 #include <linux/raid/md_p.h>
49 #include <linux/raid/md_u.h>
54 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58 static void autostart_arrays(int part);
61 static LIST_HEAD(pers_list);
62 static DEFINE_SPINLOCK(pers_lock);
64 static void md_print_devices(void);
66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
72 * is 1000 KB/sec, so the extra system load does not show up that much.
73 * Increase it if you want to have more _guaranteed_ speed. Note that
74 * the RAID driver will use the maximum available bandwidth if the IO
75 * subsystem is idle. There is also an 'absolute maximum' reconstruction
76 * speed limit - in case reconstruction slows down your system despite
79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
80 * or /sys/block/mdX/md/sync_speed_{min,max}
83 static int sysctl_speed_limit_min = 1000;
84 static int sysctl_speed_limit_max = 200000;
85 static inline int speed_min(mddev_t *mddev)
87 return mddev->sync_speed_min ?
88 mddev->sync_speed_min : sysctl_speed_limit_min;
91 static inline int speed_max(mddev_t *mddev)
93 return mddev->sync_speed_max ?
94 mddev->sync_speed_max : sysctl_speed_limit_max;
97 static struct ctl_table_header *raid_table_header;
99 static ctl_table raid_table[] = {
101 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
102 .procname = "speed_limit_min",
103 .data = &sysctl_speed_limit_min,
104 .maxlen = sizeof(int),
105 .mode = S_IRUGO|S_IWUSR,
106 .proc_handler = &proc_dointvec,
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
110 .procname = "speed_limit_max",
111 .data = &sysctl_speed_limit_max,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
119 static ctl_table raid_dir_table[] = {
121 .ctl_name = DEV_RAID,
124 .mode = S_IRUGO|S_IXUGO,
130 static ctl_table raid_root_table[] = {
136 .child = raid_dir_table,
141 static const struct block_device_operations md_fops;
143 static int start_readonly;
146 * We have a system wide 'event count' that is incremented
147 * on any 'interesting' event, and readers of /proc/mdstat
148 * can use 'poll' or 'select' to find out when the event
152 * start array, stop array, error, add device, remove device,
153 * start build, activate spare
155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
156 static atomic_t md_event_count;
157 void md_new_event(mddev_t *mddev)
159 atomic_inc(&md_event_count);
160 wake_up(&md_event_waiters);
162 EXPORT_SYMBOL_GPL(md_new_event);
164 /* Alternate version that can be called from interrupts
165 * when calling sysfs_notify isn't needed.
167 static void md_new_event_inintr(mddev_t *mddev)
169 atomic_inc(&md_event_count);
170 wake_up(&md_event_waiters);
174 * Enables to iterate over all existing md arrays
175 * all_mddevs_lock protects this list.
177 static LIST_HEAD(all_mddevs);
178 static DEFINE_SPINLOCK(all_mddevs_lock);
182 * iterates through all used mddevs in the system.
183 * We take care to grab the all_mddevs_lock whenever navigating
184 * the list, and to always hold a refcount when unlocked.
185 * Any code which breaks out of this loop while own
186 * a reference to the current mddev and must mddev_put it.
188 #define for_each_mddev(mddev,tmp) \
190 for (({ spin_lock(&all_mddevs_lock); \
191 tmp = all_mddevs.next; \
193 ({ if (tmp != &all_mddevs) \
194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
195 spin_unlock(&all_mddevs_lock); \
196 if (mddev) mddev_put(mddev); \
197 mddev = list_entry(tmp, mddev_t, all_mddevs); \
198 tmp != &all_mddevs;}); \
199 ({ spin_lock(&all_mddevs_lock); \
204 /* Rather than calling directly into the personality make_request function,
205 * IO requests come here first so that we can check if the device is
206 * being suspended pending a reconfiguration.
207 * We hold a refcount over the call to ->make_request. By the time that
208 * call has finished, the bio has been linked into some internal structure
209 * and so is visible to ->quiesce(), so we don't need the refcount any more.
211 static int md_make_request(struct request_queue *q, struct bio *bio)
213 mddev_t *mddev = q->queuedata;
215 if (mddev == NULL || mddev->pers == NULL) {
220 if (mddev->suspended) {
223 prepare_to_wait(&mddev->sb_wait, &__wait,
224 TASK_UNINTERRUPTIBLE);
225 if (!mddev->suspended)
231 finish_wait(&mddev->sb_wait, &__wait);
233 atomic_inc(&mddev->active_io);
235 rv = mddev->pers->make_request(q, bio);
236 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
237 wake_up(&mddev->sb_wait);
242 static void mddev_suspend(mddev_t *mddev)
244 BUG_ON(mddev->suspended);
245 mddev->suspended = 1;
247 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
248 mddev->pers->quiesce(mddev, 1);
249 md_unregister_thread(mddev->thread);
250 mddev->thread = NULL;
251 /* we now know that no code is executing in the personality module,
252 * except possibly the tail end of a ->bi_end_io function, but that
253 * is certain to complete before the module has a chance to get
258 static void mddev_resume(mddev_t *mddev)
260 mddev->suspended = 0;
261 wake_up(&mddev->sb_wait);
262 mddev->pers->quiesce(mddev, 0);
265 int mddev_congested(mddev_t *mddev, int bits)
267 return mddev->suspended;
269 EXPORT_SYMBOL(mddev_congested);
272 static inline mddev_t *mddev_get(mddev_t *mddev)
274 atomic_inc(&mddev->active);
278 static void mddev_delayed_delete(struct work_struct *ws);
280 static void mddev_put(mddev_t *mddev)
282 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
284 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
285 !mddev->hold_active) {
286 list_del(&mddev->all_mddevs);
287 if (mddev->gendisk) {
288 /* we did a probe so need to clean up.
289 * Call schedule_work inside the spinlock
290 * so that flush_scheduled_work() after
291 * mddev_find will succeed in waiting for the
294 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
295 schedule_work(&mddev->del_work);
299 spin_unlock(&all_mddevs_lock);
302 static mddev_t * mddev_find(dev_t unit)
304 mddev_t *mddev, *new = NULL;
307 spin_lock(&all_mddevs_lock);
310 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
311 if (mddev->unit == unit) {
313 spin_unlock(&all_mddevs_lock);
319 list_add(&new->all_mddevs, &all_mddevs);
320 spin_unlock(&all_mddevs_lock);
321 new->hold_active = UNTIL_IOCTL;
325 /* find an unused unit number */
326 static int next_minor = 512;
327 int start = next_minor;
331 dev = MKDEV(MD_MAJOR, next_minor);
333 if (next_minor > MINORMASK)
335 if (next_minor == start) {
336 /* Oh dear, all in use. */
337 spin_unlock(&all_mddevs_lock);
343 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
344 if (mddev->unit == dev) {
350 new->md_minor = MINOR(dev);
351 new->hold_active = UNTIL_STOP;
352 list_add(&new->all_mddevs, &all_mddevs);
353 spin_unlock(&all_mddevs_lock);
356 spin_unlock(&all_mddevs_lock);
358 new = kzalloc(sizeof(*new), GFP_KERNEL);
363 if (MAJOR(unit) == MD_MAJOR)
364 new->md_minor = MINOR(unit);
366 new->md_minor = MINOR(unit) >> MdpMinorShift;
368 mutex_init(&new->open_mutex);
369 mutex_init(&new->reconfig_mutex);
370 mutex_init(&new->bitmap_mutex);
371 INIT_LIST_HEAD(&new->disks);
372 INIT_LIST_HEAD(&new->all_mddevs);
373 init_timer(&new->safemode_timer);
374 atomic_set(&new->active, 1);
375 atomic_set(&new->openers, 0);
376 atomic_set(&new->active_io, 0);
377 spin_lock_init(&new->write_lock);
378 init_waitqueue_head(&new->sb_wait);
379 init_waitqueue_head(&new->recovery_wait);
380 new->reshape_position = MaxSector;
382 new->resync_max = MaxSector;
383 new->level = LEVEL_NONE;
388 static inline int mddev_lock(mddev_t * mddev)
390 return mutex_lock_interruptible(&mddev->reconfig_mutex);
393 static inline int mddev_is_locked(mddev_t *mddev)
395 return mutex_is_locked(&mddev->reconfig_mutex);
398 static inline int mddev_trylock(mddev_t * mddev)
400 return mutex_trylock(&mddev->reconfig_mutex);
403 static inline void mddev_unlock(mddev_t * mddev)
405 mutex_unlock(&mddev->reconfig_mutex);
407 md_wakeup_thread(mddev->thread);
410 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
414 list_for_each_entry(rdev, &mddev->disks, same_set)
415 if (rdev->desc_nr == nr)
421 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
425 list_for_each_entry(rdev, &mddev->disks, same_set)
426 if (rdev->bdev->bd_dev == dev)
432 static struct mdk_personality *find_pers(int level, char *clevel)
434 struct mdk_personality *pers;
435 list_for_each_entry(pers, &pers_list, list) {
436 if (level != LEVEL_NONE && pers->level == level)
438 if (strcmp(pers->name, clevel)==0)
444 /* return the offset of the super block in 512byte sectors */
445 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
447 sector_t num_sectors = bdev->bd_inode->i_size / 512;
448 return MD_NEW_SIZE_SECTORS(num_sectors);
451 static int alloc_disk_sb(mdk_rdev_t * rdev)
456 rdev->sb_page = alloc_page(GFP_KERNEL);
457 if (!rdev->sb_page) {
458 printk(KERN_ALERT "md: out of memory.\n");
465 static void free_disk_sb(mdk_rdev_t * rdev)
468 put_page(rdev->sb_page);
470 rdev->sb_page = NULL;
477 static void super_written(struct bio *bio, int error)
479 mdk_rdev_t *rdev = bio->bi_private;
480 mddev_t *mddev = rdev->mddev;
482 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
483 printk("md: super_written gets error=%d, uptodate=%d\n",
484 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
485 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
486 md_error(mddev, rdev);
489 if (atomic_dec_and_test(&mddev->pending_writes))
490 wake_up(&mddev->sb_wait);
494 static void super_written_barrier(struct bio *bio, int error)
496 struct bio *bio2 = bio->bi_private;
497 mdk_rdev_t *rdev = bio2->bi_private;
498 mddev_t *mddev = rdev->mddev;
500 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
501 error == -EOPNOTSUPP) {
503 /* barriers don't appear to be supported :-( */
504 set_bit(BarriersNotsupp, &rdev->flags);
505 mddev->barriers_work = 0;
506 spin_lock_irqsave(&mddev->write_lock, flags);
507 bio2->bi_next = mddev->biolist;
508 mddev->biolist = bio2;
509 spin_unlock_irqrestore(&mddev->write_lock, flags);
510 wake_up(&mddev->sb_wait);
514 bio->bi_private = rdev;
515 super_written(bio, error);
519 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
520 sector_t sector, int size, struct page *page)
522 /* write first size bytes of page to sector of rdev
523 * Increment mddev->pending_writes before returning
524 * and decrement it on completion, waking up sb_wait
525 * if zero is reached.
526 * If an error occurred, call md_error
528 * As we might need to resubmit the request if BIO_RW_BARRIER
529 * causes ENOTSUPP, we allocate a spare bio...
531 struct bio *bio = bio_alloc(GFP_NOIO, 1);
532 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
534 bio->bi_bdev = rdev->bdev;
535 bio->bi_sector = sector;
536 bio_add_page(bio, page, size, 0);
537 bio->bi_private = rdev;
538 bio->bi_end_io = super_written;
541 atomic_inc(&mddev->pending_writes);
542 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
544 rw |= (1<<BIO_RW_BARRIER);
545 rbio = bio_clone(bio, GFP_NOIO);
546 rbio->bi_private = bio;
547 rbio->bi_end_io = super_written_barrier;
548 submit_bio(rw, rbio);
553 void md_super_wait(mddev_t *mddev)
555 /* wait for all superblock writes that were scheduled to complete.
556 * if any had to be retried (due to BARRIER problems), retry them
560 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
561 if (atomic_read(&mddev->pending_writes)==0)
563 while (mddev->biolist) {
565 spin_lock_irq(&mddev->write_lock);
566 bio = mddev->biolist;
567 mddev->biolist = bio->bi_next ;
569 spin_unlock_irq(&mddev->write_lock);
570 submit_bio(bio->bi_rw, bio);
574 finish_wait(&mddev->sb_wait, &wq);
577 static void bi_complete(struct bio *bio, int error)
579 complete((struct completion*)bio->bi_private);
582 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
583 struct page *page, int rw)
585 struct bio *bio = bio_alloc(GFP_NOIO, 1);
586 struct completion event;
589 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
592 bio->bi_sector = sector;
593 bio_add_page(bio, page, size, 0);
594 init_completion(&event);
595 bio->bi_private = &event;
596 bio->bi_end_io = bi_complete;
598 wait_for_completion(&event);
600 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
604 EXPORT_SYMBOL_GPL(sync_page_io);
606 static int read_disk_sb(mdk_rdev_t * rdev, int size)
608 char b[BDEVNAME_SIZE];
609 if (!rdev->sb_page) {
617 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
623 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
624 bdevname(rdev->bdev,b));
628 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
630 return sb1->set_uuid0 == sb2->set_uuid0 &&
631 sb1->set_uuid1 == sb2->set_uuid1 &&
632 sb1->set_uuid2 == sb2->set_uuid2 &&
633 sb1->set_uuid3 == sb2->set_uuid3;
636 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
639 mdp_super_t *tmp1, *tmp2;
641 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
642 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
644 if (!tmp1 || !tmp2) {
646 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
654 * nr_disks is not constant
659 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
667 static u32 md_csum_fold(u32 csum)
669 csum = (csum & 0xffff) + (csum >> 16);
670 return (csum & 0xffff) + (csum >> 16);
673 static unsigned int calc_sb_csum(mdp_super_t * sb)
676 u32 *sb32 = (u32*)sb;
678 unsigned int disk_csum, csum;
680 disk_csum = sb->sb_csum;
683 for (i = 0; i < MD_SB_BYTES/4 ; i++)
685 csum = (newcsum & 0xffffffff) + (newcsum>>32);
689 /* This used to use csum_partial, which was wrong for several
690 * reasons including that different results are returned on
691 * different architectures. It isn't critical that we get exactly
692 * the same return value as before (we always csum_fold before
693 * testing, and that removes any differences). However as we
694 * know that csum_partial always returned a 16bit value on
695 * alphas, do a fold to maximise conformity to previous behaviour.
697 sb->sb_csum = md_csum_fold(disk_csum);
699 sb->sb_csum = disk_csum;
706 * Handle superblock details.
707 * We want to be able to handle multiple superblock formats
708 * so we have a common interface to them all, and an array of
709 * different handlers.
710 * We rely on user-space to write the initial superblock, and support
711 * reading and updating of superblocks.
712 * Interface methods are:
713 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
714 * loads and validates a superblock on dev.
715 * if refdev != NULL, compare superblocks on both devices
717 * 0 - dev has a superblock that is compatible with refdev
718 * 1 - dev has a superblock that is compatible and newer than refdev
719 * so dev should be used as the refdev in future
720 * -EINVAL superblock incompatible or invalid
721 * -othererror e.g. -EIO
723 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
724 * Verify that dev is acceptable into mddev.
725 * The first time, mddev->raid_disks will be 0, and data from
726 * dev should be merged in. Subsequent calls check that dev
727 * is new enough. Return 0 or -EINVAL
729 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
730 * Update the superblock for rdev with data in mddev
731 * This does not write to disc.
737 struct module *owner;
738 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
740 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
741 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
742 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev,
743 sector_t num_sectors);
747 * Check that the given mddev has no bitmap.
749 * This function is called from the run method of all personalities that do not
750 * support bitmaps. It prints an error message and returns non-zero if mddev
751 * has a bitmap. Otherwise, it returns 0.
754 int md_check_no_bitmap(mddev_t *mddev)
756 if (!mddev->bitmap_file && !mddev->bitmap_offset)
758 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
759 mdname(mddev), mddev->pers->name);
762 EXPORT_SYMBOL(md_check_no_bitmap);
765 * load_super for 0.90.0
767 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
769 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
774 * Calculate the position of the superblock (512byte sectors),
775 * it's at the end of the disk.
777 * It also happens to be a multiple of 4Kb.
779 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
781 ret = read_disk_sb(rdev, MD_SB_BYTES);
786 bdevname(rdev->bdev, b);
787 sb = (mdp_super_t*)page_address(rdev->sb_page);
789 if (sb->md_magic != MD_SB_MAGIC) {
790 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
795 if (sb->major_version != 0 ||
796 sb->minor_version < 90 ||
797 sb->minor_version > 91) {
798 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
799 sb->major_version, sb->minor_version,
804 if (sb->raid_disks <= 0)
807 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
808 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
813 rdev->preferred_minor = sb->md_minor;
814 rdev->data_offset = 0;
815 rdev->sb_size = MD_SB_BYTES;
817 if (sb->level == LEVEL_MULTIPATH)
820 rdev->desc_nr = sb->this_disk.number;
826 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
827 if (!uuid_equal(refsb, sb)) {
828 printk(KERN_WARNING "md: %s has different UUID to %s\n",
829 b, bdevname(refdev->bdev,b2));
832 if (!sb_equal(refsb, sb)) {
833 printk(KERN_WARNING "md: %s has same UUID"
834 " but different superblock to %s\n",
835 b, bdevname(refdev->bdev, b2));
839 ev2 = md_event(refsb);
845 rdev->sectors = rdev->sb_start;
847 if (rdev->sectors < sb->size * 2 && sb->level > 1)
848 /* "this cannot possibly happen" ... */
856 * validate_super for 0.90.0
858 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
861 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
862 __u64 ev1 = md_event(sb);
864 rdev->raid_disk = -1;
865 clear_bit(Faulty, &rdev->flags);
866 clear_bit(In_sync, &rdev->flags);
867 clear_bit(WriteMostly, &rdev->flags);
868 clear_bit(BarriersNotsupp, &rdev->flags);
870 if (mddev->raid_disks == 0) {
871 mddev->major_version = 0;
872 mddev->minor_version = sb->minor_version;
873 mddev->patch_version = sb->patch_version;
875 mddev->chunk_sectors = sb->chunk_size >> 9;
876 mddev->ctime = sb->ctime;
877 mddev->utime = sb->utime;
878 mddev->level = sb->level;
879 mddev->clevel[0] = 0;
880 mddev->layout = sb->layout;
881 mddev->raid_disks = sb->raid_disks;
882 mddev->dev_sectors = sb->size * 2;
884 mddev->bitmap_offset = 0;
885 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
887 if (mddev->minor_version >= 91) {
888 mddev->reshape_position = sb->reshape_position;
889 mddev->delta_disks = sb->delta_disks;
890 mddev->new_level = sb->new_level;
891 mddev->new_layout = sb->new_layout;
892 mddev->new_chunk_sectors = sb->new_chunk >> 9;
894 mddev->reshape_position = MaxSector;
895 mddev->delta_disks = 0;
896 mddev->new_level = mddev->level;
897 mddev->new_layout = mddev->layout;
898 mddev->new_chunk_sectors = mddev->chunk_sectors;
901 if (sb->state & (1<<MD_SB_CLEAN))
902 mddev->recovery_cp = MaxSector;
904 if (sb->events_hi == sb->cp_events_hi &&
905 sb->events_lo == sb->cp_events_lo) {
906 mddev->recovery_cp = sb->recovery_cp;
908 mddev->recovery_cp = 0;
911 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
912 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
913 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
914 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
916 mddev->max_disks = MD_SB_DISKS;
918 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
919 mddev->bitmap_file == NULL)
920 mddev->bitmap_offset = mddev->default_bitmap_offset;
922 } else if (mddev->pers == NULL) {
923 /* Insist on good event counter while assembling */
925 if (ev1 < mddev->events)
927 } else if (mddev->bitmap) {
928 /* if adding to array with a bitmap, then we can accept an
929 * older device ... but not too old.
931 if (ev1 < mddev->bitmap->events_cleared)
934 if (ev1 < mddev->events)
935 /* just a hot-add of a new device, leave raid_disk at -1 */
939 if (mddev->level != LEVEL_MULTIPATH) {
940 desc = sb->disks + rdev->desc_nr;
942 if (desc->state & (1<<MD_DISK_FAULTY))
943 set_bit(Faulty, &rdev->flags);
944 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
945 desc->raid_disk < mddev->raid_disks */) {
946 set_bit(In_sync, &rdev->flags);
947 rdev->raid_disk = desc->raid_disk;
948 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
949 /* active but not in sync implies recovery up to
950 * reshape position. We don't know exactly where
951 * that is, so set to zero for now */
952 if (mddev->minor_version >= 91) {
953 rdev->recovery_offset = 0;
954 rdev->raid_disk = desc->raid_disk;
957 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
958 set_bit(WriteMostly, &rdev->flags);
959 } else /* MULTIPATH are always insync */
960 set_bit(In_sync, &rdev->flags);
965 * sync_super for 0.90.0
967 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
971 int next_spare = mddev->raid_disks;
974 /* make rdev->sb match mddev data..
977 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
978 * 3/ any empty disks < next_spare become removed
980 * disks[0] gets initialised to REMOVED because
981 * we cannot be sure from other fields if it has
982 * been initialised or not.
985 int active=0, working=0,failed=0,spare=0,nr_disks=0;
987 rdev->sb_size = MD_SB_BYTES;
989 sb = (mdp_super_t*)page_address(rdev->sb_page);
991 memset(sb, 0, sizeof(*sb));
993 sb->md_magic = MD_SB_MAGIC;
994 sb->major_version = mddev->major_version;
995 sb->patch_version = mddev->patch_version;
996 sb->gvalid_words = 0; /* ignored */
997 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
998 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
999 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1000 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1002 sb->ctime = mddev->ctime;
1003 sb->level = mddev->level;
1004 sb->size = mddev->dev_sectors / 2;
1005 sb->raid_disks = mddev->raid_disks;
1006 sb->md_minor = mddev->md_minor;
1007 sb->not_persistent = 0;
1008 sb->utime = mddev->utime;
1010 sb->events_hi = (mddev->events>>32);
1011 sb->events_lo = (u32)mddev->events;
1013 if (mddev->reshape_position == MaxSector)
1014 sb->minor_version = 90;
1016 sb->minor_version = 91;
1017 sb->reshape_position = mddev->reshape_position;
1018 sb->new_level = mddev->new_level;
1019 sb->delta_disks = mddev->delta_disks;
1020 sb->new_layout = mddev->new_layout;
1021 sb->new_chunk = mddev->new_chunk_sectors << 9;
1023 mddev->minor_version = sb->minor_version;
1026 sb->recovery_cp = mddev->recovery_cp;
1027 sb->cp_events_hi = (mddev->events>>32);
1028 sb->cp_events_lo = (u32)mddev->events;
1029 if (mddev->recovery_cp == MaxSector)
1030 sb->state = (1<< MD_SB_CLEAN);
1032 sb->recovery_cp = 0;
1034 sb->layout = mddev->layout;
1035 sb->chunk_size = mddev->chunk_sectors << 9;
1037 if (mddev->bitmap && mddev->bitmap_file == NULL)
1038 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1040 sb->disks[0].state = (1<<MD_DISK_REMOVED);
1041 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1044 int is_active = test_bit(In_sync, &rdev2->flags);
1046 if (rdev2->raid_disk >= 0 &&
1047 sb->minor_version >= 91)
1048 /* we have nowhere to store the recovery_offset,
1049 * but if it is not below the reshape_position,
1050 * we can piggy-back on that.
1053 if (rdev2->raid_disk < 0 ||
1054 test_bit(Faulty, &rdev2->flags))
1057 desc_nr = rdev2->raid_disk;
1059 desc_nr = next_spare++;
1060 rdev2->desc_nr = desc_nr;
1061 d = &sb->disks[rdev2->desc_nr];
1063 d->number = rdev2->desc_nr;
1064 d->major = MAJOR(rdev2->bdev->bd_dev);
1065 d->minor = MINOR(rdev2->bdev->bd_dev);
1067 d->raid_disk = rdev2->raid_disk;
1069 d->raid_disk = rdev2->desc_nr; /* compatibility */
1070 if (test_bit(Faulty, &rdev2->flags))
1071 d->state = (1<<MD_DISK_FAULTY);
1072 else if (is_active) {
1073 d->state = (1<<MD_DISK_ACTIVE);
1074 if (test_bit(In_sync, &rdev2->flags))
1075 d->state |= (1<<MD_DISK_SYNC);
1083 if (test_bit(WriteMostly, &rdev2->flags))
1084 d->state |= (1<<MD_DISK_WRITEMOSTLY);
1086 /* now set the "removed" and "faulty" bits on any missing devices */
1087 for (i=0 ; i < mddev->raid_disks ; i++) {
1088 mdp_disk_t *d = &sb->disks[i];
1089 if (d->state == 0 && d->number == 0) {
1092 d->state = (1<<MD_DISK_REMOVED);
1093 d->state |= (1<<MD_DISK_FAULTY);
1097 sb->nr_disks = nr_disks;
1098 sb->active_disks = active;
1099 sb->working_disks = working;
1100 sb->failed_disks = failed;
1101 sb->spare_disks = spare;
1103 sb->this_disk = sb->disks[rdev->desc_nr];
1104 sb->sb_csum = calc_sb_csum(sb);
1108 * rdev_size_change for 0.90.0
1110 static unsigned long long
1111 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1113 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1114 return 0; /* component must fit device */
1115 if (rdev->mddev->bitmap_offset)
1116 return 0; /* can't move bitmap */
1117 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1118 if (!num_sectors || num_sectors > rdev->sb_start)
1119 num_sectors = rdev->sb_start;
1120 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1122 md_super_wait(rdev->mddev);
1123 return num_sectors / 2; /* kB for sysfs */
1128 * version 1 superblock
1131 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1135 unsigned long long newcsum;
1136 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1137 __le32 *isuper = (__le32*)sb;
1140 disk_csum = sb->sb_csum;
1143 for (i=0; size>=4; size -= 4 )
1144 newcsum += le32_to_cpu(*isuper++);
1147 newcsum += le16_to_cpu(*(__le16*) isuper);
1149 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1150 sb->sb_csum = disk_csum;
1151 return cpu_to_le32(csum);
1154 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1156 struct mdp_superblock_1 *sb;
1159 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1163 * Calculate the position of the superblock in 512byte sectors.
1164 * It is always aligned to a 4K boundary and
1165 * depeding on minor_version, it can be:
1166 * 0: At least 8K, but less than 12K, from end of device
1167 * 1: At start of device
1168 * 2: 4K from start of device.
1170 switch(minor_version) {
1172 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1174 sb_start &= ~(sector_t)(4*2-1);
1185 rdev->sb_start = sb_start;
1187 /* superblock is rarely larger than 1K, but it can be larger,
1188 * and it is safe to read 4k, so we do that
1190 ret = read_disk_sb(rdev, 4096);
1191 if (ret) return ret;
1194 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1196 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1197 sb->major_version != cpu_to_le32(1) ||
1198 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1199 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1200 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1203 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1204 printk("md: invalid superblock checksum on %s\n",
1205 bdevname(rdev->bdev,b));
1208 if (le64_to_cpu(sb->data_size) < 10) {
1209 printk("md: data_size too small on %s\n",
1210 bdevname(rdev->bdev,b));
1214 rdev->preferred_minor = 0xffff;
1215 rdev->data_offset = le64_to_cpu(sb->data_offset);
1216 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1218 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1219 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1220 if (rdev->sb_size & bmask)
1221 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1224 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1227 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1230 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1236 struct mdp_superblock_1 *refsb =
1237 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1239 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1240 sb->level != refsb->level ||
1241 sb->layout != refsb->layout ||
1242 sb->chunksize != refsb->chunksize) {
1243 printk(KERN_WARNING "md: %s has strangely different"
1244 " superblock to %s\n",
1245 bdevname(rdev->bdev,b),
1246 bdevname(refdev->bdev,b2));
1249 ev1 = le64_to_cpu(sb->events);
1250 ev2 = le64_to_cpu(refsb->events);
1258 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1259 le64_to_cpu(sb->data_offset);
1261 rdev->sectors = rdev->sb_start;
1262 if (rdev->sectors < le64_to_cpu(sb->data_size))
1264 rdev->sectors = le64_to_cpu(sb->data_size);
1265 if (le64_to_cpu(sb->size) > rdev->sectors)
1270 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1272 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1273 __u64 ev1 = le64_to_cpu(sb->events);
1275 rdev->raid_disk = -1;
1276 clear_bit(Faulty, &rdev->flags);
1277 clear_bit(In_sync, &rdev->flags);
1278 clear_bit(WriteMostly, &rdev->flags);
1279 clear_bit(BarriersNotsupp, &rdev->flags);
1281 if (mddev->raid_disks == 0) {
1282 mddev->major_version = 1;
1283 mddev->patch_version = 0;
1284 mddev->external = 0;
1285 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1286 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1287 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1288 mddev->level = le32_to_cpu(sb->level);
1289 mddev->clevel[0] = 0;
1290 mddev->layout = le32_to_cpu(sb->layout);
1291 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1292 mddev->dev_sectors = le64_to_cpu(sb->size);
1293 mddev->events = ev1;
1294 mddev->bitmap_offset = 0;
1295 mddev->default_bitmap_offset = 1024 >> 9;
1297 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1298 memcpy(mddev->uuid, sb->set_uuid, 16);
1300 mddev->max_disks = (4096-256)/2;
1302 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1303 mddev->bitmap_file == NULL )
1304 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1306 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1307 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1308 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1309 mddev->new_level = le32_to_cpu(sb->new_level);
1310 mddev->new_layout = le32_to_cpu(sb->new_layout);
1311 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1313 mddev->reshape_position = MaxSector;
1314 mddev->delta_disks = 0;
1315 mddev->new_level = mddev->level;
1316 mddev->new_layout = mddev->layout;
1317 mddev->new_chunk_sectors = mddev->chunk_sectors;
1320 } else if (mddev->pers == NULL) {
1321 /* Insist of good event counter while assembling */
1323 if (ev1 < mddev->events)
1325 } else if (mddev->bitmap) {
1326 /* If adding to array with a bitmap, then we can accept an
1327 * older device, but not too old.
1329 if (ev1 < mddev->bitmap->events_cleared)
1332 if (ev1 < mddev->events)
1333 /* just a hot-add of a new device, leave raid_disk at -1 */
1336 if (mddev->level != LEVEL_MULTIPATH) {
1338 if (rdev->desc_nr < 0 ||
1339 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1343 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1345 case 0xffff: /* spare */
1347 case 0xfffe: /* faulty */
1348 set_bit(Faulty, &rdev->flags);
1351 if ((le32_to_cpu(sb->feature_map) &
1352 MD_FEATURE_RECOVERY_OFFSET))
1353 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1355 set_bit(In_sync, &rdev->flags);
1356 rdev->raid_disk = role;
1359 if (sb->devflags & WriteMostly1)
1360 set_bit(WriteMostly, &rdev->flags);
1361 } else /* MULTIPATH are always insync */
1362 set_bit(In_sync, &rdev->flags);
1367 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1369 struct mdp_superblock_1 *sb;
1372 /* make rdev->sb match mddev and rdev data. */
1374 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1376 sb->feature_map = 0;
1378 sb->recovery_offset = cpu_to_le64(0);
1379 memset(sb->pad1, 0, sizeof(sb->pad1));
1380 memset(sb->pad2, 0, sizeof(sb->pad2));
1381 memset(sb->pad3, 0, sizeof(sb->pad3));
1383 sb->utime = cpu_to_le64((__u64)mddev->utime);
1384 sb->events = cpu_to_le64(mddev->events);
1386 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1388 sb->resync_offset = cpu_to_le64(0);
1390 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1392 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1393 sb->size = cpu_to_le64(mddev->dev_sectors);
1394 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1395 sb->level = cpu_to_le32(mddev->level);
1396 sb->layout = cpu_to_le32(mddev->layout);
1398 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1399 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1400 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1403 if (rdev->raid_disk >= 0 &&
1404 !test_bit(In_sync, &rdev->flags)) {
1405 if (rdev->recovery_offset > 0) {
1407 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1408 sb->recovery_offset =
1409 cpu_to_le64(rdev->recovery_offset);
1413 if (mddev->reshape_position != MaxSector) {
1414 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1415 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1416 sb->new_layout = cpu_to_le32(mddev->new_layout);
1417 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1418 sb->new_level = cpu_to_le32(mddev->new_level);
1419 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1423 list_for_each_entry(rdev2, &mddev->disks, same_set)
1424 if (rdev2->desc_nr+1 > max_dev)
1425 max_dev = rdev2->desc_nr+1;
1427 if (max_dev > le32_to_cpu(sb->max_dev)) {
1429 sb->max_dev = cpu_to_le32(max_dev);
1430 rdev->sb_size = max_dev * 2 + 256;
1431 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1432 if (rdev->sb_size & bmask)
1433 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1435 for (i=0; i<max_dev;i++)
1436 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1438 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1440 if (test_bit(Faulty, &rdev2->flags))
1441 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1442 else if (test_bit(In_sync, &rdev2->flags))
1443 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1444 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1445 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1447 sb->dev_roles[i] = cpu_to_le16(0xffff);
1450 sb->sb_csum = calc_sb_1_csum(sb);
1453 static unsigned long long
1454 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1456 struct mdp_superblock_1 *sb;
1457 sector_t max_sectors;
1458 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1459 return 0; /* component must fit device */
1460 if (rdev->sb_start < rdev->data_offset) {
1461 /* minor versions 1 and 2; superblock before data */
1462 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1463 max_sectors -= rdev->data_offset;
1464 if (!num_sectors || num_sectors > max_sectors)
1465 num_sectors = max_sectors;
1466 } else if (rdev->mddev->bitmap_offset) {
1467 /* minor version 0 with bitmap we can't move */
1470 /* minor version 0; superblock after data */
1472 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1473 sb_start &= ~(sector_t)(4*2 - 1);
1474 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1475 if (!num_sectors || num_sectors > max_sectors)
1476 num_sectors = max_sectors;
1477 rdev->sb_start = sb_start;
1479 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1480 sb->data_size = cpu_to_le64(num_sectors);
1481 sb->super_offset = rdev->sb_start;
1482 sb->sb_csum = calc_sb_1_csum(sb);
1483 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1485 md_super_wait(rdev->mddev);
1486 return num_sectors / 2; /* kB for sysfs */
1489 static struct super_type super_types[] = {
1492 .owner = THIS_MODULE,
1493 .load_super = super_90_load,
1494 .validate_super = super_90_validate,
1495 .sync_super = super_90_sync,
1496 .rdev_size_change = super_90_rdev_size_change,
1500 .owner = THIS_MODULE,
1501 .load_super = super_1_load,
1502 .validate_super = super_1_validate,
1503 .sync_super = super_1_sync,
1504 .rdev_size_change = super_1_rdev_size_change,
1508 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1510 mdk_rdev_t *rdev, *rdev2;
1513 rdev_for_each_rcu(rdev, mddev1)
1514 rdev_for_each_rcu(rdev2, mddev2)
1515 if (rdev->bdev->bd_contains ==
1516 rdev2->bdev->bd_contains) {
1524 static LIST_HEAD(pending_raid_disks);
1527 * Try to register data integrity profile for an mddev
1529 * This is called when an array is started and after a disk has been kicked
1530 * from the array. It only succeeds if all working and active component devices
1531 * are integrity capable with matching profiles.
1533 int md_integrity_register(mddev_t *mddev)
1535 mdk_rdev_t *rdev, *reference = NULL;
1537 if (list_empty(&mddev->disks))
1538 return 0; /* nothing to do */
1539 if (blk_get_integrity(mddev->gendisk))
1540 return 0; /* already registered */
1541 list_for_each_entry(rdev, &mddev->disks, same_set) {
1542 /* skip spares and non-functional disks */
1543 if (test_bit(Faulty, &rdev->flags))
1545 if (rdev->raid_disk < 0)
1548 * If at least one rdev is not integrity capable, we can not
1549 * enable data integrity for the md device.
1551 if (!bdev_get_integrity(rdev->bdev))
1554 /* Use the first rdev as the reference */
1558 /* does this rdev's profile match the reference profile? */
1559 if (blk_integrity_compare(reference->bdev->bd_disk,
1560 rdev->bdev->bd_disk) < 0)
1564 * All component devices are integrity capable and have matching
1565 * profiles, register the common profile for the md device.
1567 if (blk_integrity_register(mddev->gendisk,
1568 bdev_get_integrity(reference->bdev)) != 0) {
1569 printk(KERN_ERR "md: failed to register integrity for %s\n",
1573 printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1577 EXPORT_SYMBOL(md_integrity_register);
1579 /* Disable data integrity if non-capable/non-matching disk is being added */
1580 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1582 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1583 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1585 if (!bi_mddev) /* nothing to do */
1587 if (rdev->raid_disk < 0) /* skip spares */
1589 if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1590 rdev->bdev->bd_disk) >= 0)
1592 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1593 blk_integrity_unregister(mddev->gendisk);
1595 EXPORT_SYMBOL(md_integrity_add_rdev);
1597 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1599 char b[BDEVNAME_SIZE];
1609 /* prevent duplicates */
1610 if (find_rdev(mddev, rdev->bdev->bd_dev))
1613 /* make sure rdev->sectors exceeds mddev->dev_sectors */
1614 if (rdev->sectors && (mddev->dev_sectors == 0 ||
1615 rdev->sectors < mddev->dev_sectors)) {
1617 /* Cannot change size, so fail
1618 * If mddev->level <= 0, then we don't care
1619 * about aligning sizes (e.g. linear)
1621 if (mddev->level > 0)
1624 mddev->dev_sectors = rdev->sectors;
1627 /* Verify rdev->desc_nr is unique.
1628 * If it is -1, assign a free number, else
1629 * check number is not in use
1631 if (rdev->desc_nr < 0) {
1633 if (mddev->pers) choice = mddev->raid_disks;
1634 while (find_rdev_nr(mddev, choice))
1636 rdev->desc_nr = choice;
1638 if (find_rdev_nr(mddev, rdev->desc_nr))
1641 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1642 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1643 mdname(mddev), mddev->max_disks);
1646 bdevname(rdev->bdev,b);
1647 while ( (s=strchr(b, '/')) != NULL)
1650 rdev->mddev = mddev;
1651 printk(KERN_INFO "md: bind<%s>\n", b);
1653 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1656 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1657 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1658 kobject_del(&rdev->kobj);
1661 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state");
1663 list_add_rcu(&rdev->same_set, &mddev->disks);
1664 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1666 /* May as well allow recovery to be retried once */
1667 mddev->recovery_disabled = 0;
1672 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1677 static void md_delayed_delete(struct work_struct *ws)
1679 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1680 kobject_del(&rdev->kobj);
1681 kobject_put(&rdev->kobj);
1684 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1686 char b[BDEVNAME_SIZE];
1691 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1692 list_del_rcu(&rdev->same_set);
1693 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1695 sysfs_remove_link(&rdev->kobj, "block");
1696 sysfs_put(rdev->sysfs_state);
1697 rdev->sysfs_state = NULL;
1698 /* We need to delay this, otherwise we can deadlock when
1699 * writing to 'remove' to "dev/state". We also need
1700 * to delay it due to rcu usage.
1703 INIT_WORK(&rdev->del_work, md_delayed_delete);
1704 kobject_get(&rdev->kobj);
1705 schedule_work(&rdev->del_work);
1709 * prevent the device from being mounted, repartitioned or
1710 * otherwise reused by a RAID array (or any other kernel
1711 * subsystem), by bd_claiming the device.
1713 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1716 struct block_device *bdev;
1717 char b[BDEVNAME_SIZE];
1719 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1721 printk(KERN_ERR "md: could not open %s.\n",
1722 __bdevname(dev, b));
1723 return PTR_ERR(bdev);
1725 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1727 printk(KERN_ERR "md: could not bd_claim %s.\n",
1729 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1733 set_bit(AllReserved, &rdev->flags);
1738 static void unlock_rdev(mdk_rdev_t *rdev)
1740 struct block_device *bdev = rdev->bdev;
1745 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1748 void md_autodetect_dev(dev_t dev);
1750 static void export_rdev(mdk_rdev_t * rdev)
1752 char b[BDEVNAME_SIZE];
1753 printk(KERN_INFO "md: export_rdev(%s)\n",
1754 bdevname(rdev->bdev,b));
1759 if (test_bit(AutoDetected, &rdev->flags))
1760 md_autodetect_dev(rdev->bdev->bd_dev);
1763 kobject_put(&rdev->kobj);
1766 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1768 unbind_rdev_from_array(rdev);
1772 static void export_array(mddev_t *mddev)
1774 mdk_rdev_t *rdev, *tmp;
1776 rdev_for_each(rdev, tmp, mddev) {
1781 kick_rdev_from_array(rdev);
1783 if (!list_empty(&mddev->disks))
1785 mddev->raid_disks = 0;
1786 mddev->major_version = 0;
1789 static void print_desc(mdp_disk_t *desc)
1791 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1792 desc->major,desc->minor,desc->raid_disk,desc->state);
1795 static void print_sb_90(mdp_super_t *sb)
1800 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1801 sb->major_version, sb->minor_version, sb->patch_version,
1802 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1804 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1805 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1806 sb->md_minor, sb->layout, sb->chunk_size);
1807 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1808 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1809 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1810 sb->failed_disks, sb->spare_disks,
1811 sb->sb_csum, (unsigned long)sb->events_lo);
1814 for (i = 0; i < MD_SB_DISKS; i++) {
1817 desc = sb->disks + i;
1818 if (desc->number || desc->major || desc->minor ||
1819 desc->raid_disk || (desc->state && (desc->state != 4))) {
1820 printk(" D %2d: ", i);
1824 printk(KERN_INFO "md: THIS: ");
1825 print_desc(&sb->this_disk);
1828 static void print_sb_1(struct mdp_superblock_1 *sb)
1832 uuid = sb->set_uuid;
1834 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
1835 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
1836 "md: Name: \"%s\" CT:%llu\n",
1837 le32_to_cpu(sb->major_version),
1838 le32_to_cpu(sb->feature_map),
1839 uuid[0], uuid[1], uuid[2], uuid[3],
1840 uuid[4], uuid[5], uuid[6], uuid[7],
1841 uuid[8], uuid[9], uuid[10], uuid[11],
1842 uuid[12], uuid[13], uuid[14], uuid[15],
1844 (unsigned long long)le64_to_cpu(sb->ctime)
1845 & MD_SUPERBLOCK_1_TIME_SEC_MASK);
1847 uuid = sb->device_uuid;
1849 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
1851 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
1852 ":%02x%02x%02x%02x%02x%02x\n"
1853 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
1854 "md: (MaxDev:%u) \n",
1855 le32_to_cpu(sb->level),
1856 (unsigned long long)le64_to_cpu(sb->size),
1857 le32_to_cpu(sb->raid_disks),
1858 le32_to_cpu(sb->layout),
1859 le32_to_cpu(sb->chunksize),
1860 (unsigned long long)le64_to_cpu(sb->data_offset),
1861 (unsigned long long)le64_to_cpu(sb->data_size),
1862 (unsigned long long)le64_to_cpu(sb->super_offset),
1863 (unsigned long long)le64_to_cpu(sb->recovery_offset),
1864 le32_to_cpu(sb->dev_number),
1865 uuid[0], uuid[1], uuid[2], uuid[3],
1866 uuid[4], uuid[5], uuid[6], uuid[7],
1867 uuid[8], uuid[9], uuid[10], uuid[11],
1868 uuid[12], uuid[13], uuid[14], uuid[15],
1870 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
1871 (unsigned long long)le64_to_cpu(sb->events),
1872 (unsigned long long)le64_to_cpu(sb->resync_offset),
1873 le32_to_cpu(sb->sb_csum),
1874 le32_to_cpu(sb->max_dev)
1878 static void print_rdev(mdk_rdev_t *rdev, int major_version)
1880 char b[BDEVNAME_SIZE];
1881 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
1882 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
1883 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1885 if (rdev->sb_loaded) {
1886 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
1887 switch (major_version) {
1889 print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
1892 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
1896 printk(KERN_INFO "md: no rdev superblock!\n");
1899 static void md_print_devices(void)
1901 struct list_head *tmp;
1904 char b[BDEVNAME_SIZE];
1907 printk("md: **********************************\n");
1908 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1909 printk("md: **********************************\n");
1910 for_each_mddev(mddev, tmp) {
1913 bitmap_print_sb(mddev->bitmap);
1915 printk("%s: ", mdname(mddev));
1916 list_for_each_entry(rdev, &mddev->disks, same_set)
1917 printk("<%s>", bdevname(rdev->bdev,b));
1920 list_for_each_entry(rdev, &mddev->disks, same_set)
1921 print_rdev(rdev, mddev->major_version);
1923 printk("md: **********************************\n");
1928 static void sync_sbs(mddev_t * mddev, int nospares)
1930 /* Update each superblock (in-memory image), but
1931 * if we are allowed to, skip spares which already
1932 * have the right event counter, or have one earlier
1933 * (which would mean they aren't being marked as dirty
1934 * with the rest of the array)
1938 /* First make sure individual recovery_offsets are correct */
1939 list_for_each_entry(rdev, &mddev->disks, same_set) {
1940 if (rdev->raid_disk >= 0 &&
1941 !test_bit(In_sync, &rdev->flags) &&
1942 mddev->curr_resync_completed > rdev->recovery_offset)
1943 rdev->recovery_offset = mddev->curr_resync_completed;
1946 list_for_each_entry(rdev, &mddev->disks, same_set) {
1947 if (rdev->sb_events == mddev->events ||
1949 rdev->raid_disk < 0 &&
1950 (rdev->sb_events&1)==0 &&
1951 rdev->sb_events+1 == mddev->events)) {
1952 /* Don't update this superblock */
1953 rdev->sb_loaded = 2;
1955 super_types[mddev->major_version].
1956 sync_super(mddev, rdev);
1957 rdev->sb_loaded = 1;
1962 static void md_update_sb(mddev_t * mddev, int force_change)
1968 mddev->utime = get_seconds();
1969 if (mddev->external)
1972 spin_lock_irq(&mddev->write_lock);
1974 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1975 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1977 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1978 /* just a clean<-> dirty transition, possibly leave spares alone,
1979 * though if events isn't the right even/odd, we will have to do
1985 if (mddev->degraded)
1986 /* If the array is degraded, then skipping spares is both
1987 * dangerous and fairly pointless.
1988 * Dangerous because a device that was removed from the array
1989 * might have a event_count that still looks up-to-date,
1990 * so it can be re-added without a resync.
1991 * Pointless because if there are any spares to skip,
1992 * then a recovery will happen and soon that array won't
1993 * be degraded any more and the spare can go back to sleep then.
1997 sync_req = mddev->in_sync;
1999 /* If this is just a dirty<->clean transition, and the array is clean
2000 * and 'events' is odd, we can roll back to the previous clean state */
2002 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2003 && (mddev->events & 1)
2004 && mddev->events != 1)
2007 /* otherwise we have to go forward and ... */
2009 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
2010 /* .. if the array isn't clean, an 'even' event must also go
2012 if ((mddev->events&1)==0)
2015 /* otherwise an 'odd' event must go to spares */
2016 if ((mddev->events&1))
2021 if (!mddev->events) {
2023 * oops, this 64-bit counter should never wrap.
2024 * Either we are in around ~1 trillion A.C., assuming
2025 * 1 reboot per second, or we have a bug:
2032 * do not write anything to disk if using
2033 * nonpersistent superblocks
2035 if (!mddev->persistent) {
2036 if (!mddev->external)
2037 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2039 spin_unlock_irq(&mddev->write_lock);
2040 wake_up(&mddev->sb_wait);
2043 sync_sbs(mddev, nospares);
2044 spin_unlock_irq(&mddev->write_lock);
2047 "md: updating %s RAID superblock on device (in sync %d)\n",
2048 mdname(mddev),mddev->in_sync);
2050 bitmap_update_sb(mddev->bitmap);
2051 list_for_each_entry(rdev, &mddev->disks, same_set) {
2052 char b[BDEVNAME_SIZE];
2053 dprintk(KERN_INFO "md: ");
2054 if (rdev->sb_loaded != 1)
2055 continue; /* no noise on spare devices */
2056 if (test_bit(Faulty, &rdev->flags))
2057 dprintk("(skipping faulty ");
2059 dprintk("%s ", bdevname(rdev->bdev,b));
2060 if (!test_bit(Faulty, &rdev->flags)) {
2061 md_super_write(mddev,rdev,
2062 rdev->sb_start, rdev->sb_size,
2064 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2065 bdevname(rdev->bdev,b),
2066 (unsigned long long)rdev->sb_start);
2067 rdev->sb_events = mddev->events;
2071 if (mddev->level == LEVEL_MULTIPATH)
2072 /* only need to write one superblock... */
2075 md_super_wait(mddev);
2076 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2078 spin_lock_irq(&mddev->write_lock);
2079 if (mddev->in_sync != sync_req ||
2080 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2081 /* have to write it out again */
2082 spin_unlock_irq(&mddev->write_lock);
2085 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2086 spin_unlock_irq(&mddev->write_lock);
2087 wake_up(&mddev->sb_wait);
2088 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2089 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2093 /* words written to sysfs files may, or may not, be \n terminated.
2094 * We want to accept with case. For this we use cmd_match.
2096 static int cmd_match(const char *cmd, const char *str)
2098 /* See if cmd, written into a sysfs file, matches
2099 * str. They must either be the same, or cmd can
2100 * have a trailing newline
2102 while (*cmd && *str && *cmd == *str) {
2113 struct rdev_sysfs_entry {
2114 struct attribute attr;
2115 ssize_t (*show)(mdk_rdev_t *, char *);
2116 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2120 state_show(mdk_rdev_t *rdev, char *page)
2125 if (test_bit(Faulty, &rdev->flags)) {
2126 len+= sprintf(page+len, "%sfaulty",sep);
2129 if (test_bit(In_sync, &rdev->flags)) {
2130 len += sprintf(page+len, "%sin_sync",sep);
2133 if (test_bit(WriteMostly, &rdev->flags)) {
2134 len += sprintf(page+len, "%swrite_mostly",sep);
2137 if (test_bit(Blocked, &rdev->flags)) {
2138 len += sprintf(page+len, "%sblocked", sep);
2141 if (!test_bit(Faulty, &rdev->flags) &&
2142 !test_bit(In_sync, &rdev->flags)) {
2143 len += sprintf(page+len, "%sspare", sep);
2146 return len+sprintf(page+len, "\n");
2150 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2153 * faulty - simulates and error
2154 * remove - disconnects the device
2155 * writemostly - sets write_mostly
2156 * -writemostly - clears write_mostly
2157 * blocked - sets the Blocked flag
2158 * -blocked - clears the Blocked flag
2159 * insync - sets Insync providing device isn't active
2162 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2163 md_error(rdev->mddev, rdev);
2165 } else if (cmd_match(buf, "remove")) {
2166 if (rdev->raid_disk >= 0)
2169 mddev_t *mddev = rdev->mddev;
2170 kick_rdev_from_array(rdev);
2172 md_update_sb(mddev, 1);
2173 md_new_event(mddev);
2176 } else if (cmd_match(buf, "writemostly")) {
2177 set_bit(WriteMostly, &rdev->flags);
2179 } else if (cmd_match(buf, "-writemostly")) {
2180 clear_bit(WriteMostly, &rdev->flags);
2182 } else if (cmd_match(buf, "blocked")) {
2183 set_bit(Blocked, &rdev->flags);
2185 } else if (cmd_match(buf, "-blocked")) {
2186 clear_bit(Blocked, &rdev->flags);
2187 wake_up(&rdev->blocked_wait);
2188 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2189 md_wakeup_thread(rdev->mddev->thread);
2192 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2193 set_bit(In_sync, &rdev->flags);
2196 if (!err && rdev->sysfs_state)
2197 sysfs_notify_dirent(rdev->sysfs_state);
2198 return err ? err : len;
2200 static struct rdev_sysfs_entry rdev_state =
2201 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2204 errors_show(mdk_rdev_t *rdev, char *page)
2206 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2210 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2213 unsigned long n = simple_strtoul(buf, &e, 10);
2214 if (*buf && (*e == 0 || *e == '\n')) {
2215 atomic_set(&rdev->corrected_errors, n);
2220 static struct rdev_sysfs_entry rdev_errors =
2221 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2224 slot_show(mdk_rdev_t *rdev, char *page)
2226 if (rdev->raid_disk < 0)
2227 return sprintf(page, "none\n");
2229 return sprintf(page, "%d\n", rdev->raid_disk);
2233 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2238 int slot = simple_strtoul(buf, &e, 10);
2239 if (strncmp(buf, "none", 4)==0)
2241 else if (e==buf || (*e && *e!= '\n'))
2243 if (rdev->mddev->pers && slot == -1) {
2244 /* Setting 'slot' on an active array requires also
2245 * updating the 'rd%d' link, and communicating
2246 * with the personality with ->hot_*_disk.
2247 * For now we only support removing
2248 * failed/spare devices. This normally happens automatically,
2249 * but not when the metadata is externally managed.
2251 if (rdev->raid_disk == -1)
2253 /* personality does all needed checks */
2254 if (rdev->mddev->pers->hot_add_disk == NULL)
2256 err = rdev->mddev->pers->
2257 hot_remove_disk(rdev->mddev, rdev->raid_disk);
2260 sprintf(nm, "rd%d", rdev->raid_disk);
2261 sysfs_remove_link(&rdev->mddev->kobj, nm);
2262 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2263 md_wakeup_thread(rdev->mddev->thread);
2264 } else if (rdev->mddev->pers) {
2266 /* Activating a spare .. or possibly reactivating
2267 * if we ever get bitmaps working here.
2270 if (rdev->raid_disk != -1)
2273 if (rdev->mddev->pers->hot_add_disk == NULL)
2276 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2277 if (rdev2->raid_disk == slot)
2280 rdev->raid_disk = slot;
2281 if (test_bit(In_sync, &rdev->flags))
2282 rdev->saved_raid_disk = slot;
2284 rdev->saved_raid_disk = -1;
2285 err = rdev->mddev->pers->
2286 hot_add_disk(rdev->mddev, rdev);
2288 rdev->raid_disk = -1;
2291 sysfs_notify_dirent(rdev->sysfs_state);
2292 sprintf(nm, "rd%d", rdev->raid_disk);
2293 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2295 "md: cannot register "
2297 nm, mdname(rdev->mddev));
2299 /* don't wakeup anyone, leave that to userspace. */
2301 if (slot >= rdev->mddev->raid_disks)
2303 rdev->raid_disk = slot;
2304 /* assume it is working */
2305 clear_bit(Faulty, &rdev->flags);
2306 clear_bit(WriteMostly, &rdev->flags);
2307 set_bit(In_sync, &rdev->flags);
2308 sysfs_notify_dirent(rdev->sysfs_state);
2314 static struct rdev_sysfs_entry rdev_slot =
2315 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2318 offset_show(mdk_rdev_t *rdev, char *page)
2320 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2324 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2327 unsigned long long offset = simple_strtoull(buf, &e, 10);
2328 if (e==buf || (*e && *e != '\n'))
2330 if (rdev->mddev->pers && rdev->raid_disk >= 0)
2332 if (rdev->sectors && rdev->mddev->external)
2333 /* Must set offset before size, so overlap checks
2336 rdev->data_offset = offset;
2340 static struct rdev_sysfs_entry rdev_offset =
2341 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2344 rdev_size_show(mdk_rdev_t *rdev, char *page)
2346 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2349 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2351 /* check if two start/length pairs overlap */
2359 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2361 unsigned long long blocks;
2364 if (strict_strtoull(buf, 10, &blocks) < 0)
2367 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2368 return -EINVAL; /* sector conversion overflow */
2371 if (new != blocks * 2)
2372 return -EINVAL; /* unsigned long long to sector_t overflow */
2379 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2381 mddev_t *my_mddev = rdev->mddev;
2382 sector_t oldsectors = rdev->sectors;
2385 if (strict_blocks_to_sectors(buf, §ors) < 0)
2387 if (my_mddev->pers && rdev->raid_disk >= 0) {
2388 if (my_mddev->persistent) {
2389 sectors = super_types[my_mddev->major_version].
2390 rdev_size_change(rdev, sectors);
2393 } else if (!sectors)
2394 sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2397 if (sectors < my_mddev->dev_sectors)
2398 return -EINVAL; /* component must fit device */
2400 rdev->sectors = sectors;
2401 if (sectors > oldsectors && my_mddev->external) {
2402 /* need to check that all other rdevs with the same ->bdev
2403 * do not overlap. We need to unlock the mddev to avoid
2404 * a deadlock. We have already changed rdev->sectors, and if
2405 * we have to change it back, we will have the lock again.
2409 struct list_head *tmp;
2411 mddev_unlock(my_mddev);
2412 for_each_mddev(mddev, tmp) {
2416 list_for_each_entry(rdev2, &mddev->disks, same_set)
2417 if (test_bit(AllReserved, &rdev2->flags) ||
2418 (rdev->bdev == rdev2->bdev &&
2420 overlaps(rdev->data_offset, rdev->sectors,
2426 mddev_unlock(mddev);
2432 mddev_lock(my_mddev);
2434 /* Someone else could have slipped in a size
2435 * change here, but doing so is just silly.
2436 * We put oldsectors back because we *know* it is
2437 * safe, and trust userspace not to race with
2440 rdev->sectors = oldsectors;
2447 static struct rdev_sysfs_entry rdev_size =
2448 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2450 static struct attribute *rdev_default_attrs[] = {
2459 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2461 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2462 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2463 mddev_t *mddev = rdev->mddev;
2469 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2471 if (rdev->mddev == NULL)
2474 rv = entry->show(rdev, page);
2475 mddev_unlock(mddev);
2481 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2482 const char *page, size_t length)
2484 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2485 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2487 mddev_t *mddev = rdev->mddev;
2491 if (!capable(CAP_SYS_ADMIN))
2493 rv = mddev ? mddev_lock(mddev): -EBUSY;
2495 if (rdev->mddev == NULL)
2498 rv = entry->store(rdev, page, length);
2499 mddev_unlock(mddev);
2504 static void rdev_free(struct kobject *ko)
2506 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2509 static struct sysfs_ops rdev_sysfs_ops = {
2510 .show = rdev_attr_show,
2511 .store = rdev_attr_store,
2513 static struct kobj_type rdev_ktype = {
2514 .release = rdev_free,
2515 .sysfs_ops = &rdev_sysfs_ops,
2516 .default_attrs = rdev_default_attrs,
2520 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2522 * mark the device faulty if:
2524 * - the device is nonexistent (zero size)
2525 * - the device has no valid superblock
2527 * a faulty rdev _never_ has rdev->sb set.
2529 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2531 char b[BDEVNAME_SIZE];
2536 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2538 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2539 return ERR_PTR(-ENOMEM);
2542 if ((err = alloc_disk_sb(rdev)))
2545 err = lock_rdev(rdev, newdev, super_format == -2);
2549 kobject_init(&rdev->kobj, &rdev_ktype);
2552 rdev->saved_raid_disk = -1;
2553 rdev->raid_disk = -1;
2555 rdev->data_offset = 0;
2556 rdev->sb_events = 0;
2557 atomic_set(&rdev->nr_pending, 0);
2558 atomic_set(&rdev->read_errors, 0);
2559 atomic_set(&rdev->corrected_errors, 0);
2561 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2564 "md: %s has zero or unknown size, marking faulty!\n",
2565 bdevname(rdev->bdev,b));
2570 if (super_format >= 0) {
2571 err = super_types[super_format].
2572 load_super(rdev, NULL, super_minor);
2573 if (err == -EINVAL) {
2575 "md: %s does not have a valid v%d.%d "
2576 "superblock, not importing!\n",
2577 bdevname(rdev->bdev,b),
2578 super_format, super_minor);
2583 "md: could not read %s's sb, not importing!\n",
2584 bdevname(rdev->bdev,b));
2589 INIT_LIST_HEAD(&rdev->same_set);
2590 init_waitqueue_head(&rdev->blocked_wait);
2595 if (rdev->sb_page) {
2601 return ERR_PTR(err);
2605 * Check a full RAID array for plausibility
2609 static void analyze_sbs(mddev_t * mddev)
2612 mdk_rdev_t *rdev, *freshest, *tmp;
2613 char b[BDEVNAME_SIZE];
2616 rdev_for_each(rdev, tmp, mddev)
2617 switch (super_types[mddev->major_version].
2618 load_super(rdev, freshest, mddev->minor_version)) {
2626 "md: fatal superblock inconsistency in %s"
2627 " -- removing from array\n",
2628 bdevname(rdev->bdev,b));
2629 kick_rdev_from_array(rdev);
2633 super_types[mddev->major_version].
2634 validate_super(mddev, freshest);
2637 rdev_for_each(rdev, tmp, mddev) {
2638 if (rdev->desc_nr >= mddev->max_disks ||
2639 i > mddev->max_disks) {
2641 "md: %s: %s: only %d devices permitted\n",
2642 mdname(mddev), bdevname(rdev->bdev, b),
2644 kick_rdev_from_array(rdev);
2647 if (rdev != freshest)
2648 if (super_types[mddev->major_version].
2649 validate_super(mddev, rdev)) {
2650 printk(KERN_WARNING "md: kicking non-fresh %s"
2652 bdevname(rdev->bdev,b));
2653 kick_rdev_from_array(rdev);
2656 if (mddev->level == LEVEL_MULTIPATH) {
2657 rdev->desc_nr = i++;
2658 rdev->raid_disk = rdev->desc_nr;
2659 set_bit(In_sync, &rdev->flags);
2660 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2661 rdev->raid_disk = -1;
2662 clear_bit(In_sync, &rdev->flags);
2667 static void md_safemode_timeout(unsigned long data);
2670 safe_delay_show(mddev_t *mddev, char *page)
2672 int msec = (mddev->safemode_delay*1000)/HZ;
2673 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2676 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2684 /* remove a period, and count digits after it */
2685 if (len >= sizeof(buf))
2687 strlcpy(buf, cbuf, sizeof(buf));
2688 for (i=0; i<len; i++) {
2690 if (isdigit(buf[i])) {
2695 } else if (buf[i] == '.') {
2700 if (strict_strtoul(buf, 10, &msec) < 0)
2702 msec = (msec * 1000) / scale;
2704 mddev->safemode_delay = 0;
2706 unsigned long old_delay = mddev->safemode_delay;
2707 mddev->safemode_delay = (msec*HZ)/1000;
2708 if (mddev->safemode_delay == 0)
2709 mddev->safemode_delay = 1;
2710 if (mddev->safemode_delay < old_delay)
2711 md_safemode_timeout((unsigned long)mddev);
2715 static struct md_sysfs_entry md_safe_delay =
2716 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2719 level_show(mddev_t *mddev, char *page)
2721 struct mdk_personality *p = mddev->pers;
2723 return sprintf(page, "%s\n", p->name);
2724 else if (mddev->clevel[0])
2725 return sprintf(page, "%s\n", mddev->clevel);
2726 else if (mddev->level != LEVEL_NONE)
2727 return sprintf(page, "%d\n", mddev->level);
2733 level_store(mddev_t *mddev, const char *buf, size_t len)
2737 struct mdk_personality *pers;
2741 if (mddev->pers == NULL) {
2744 if (len >= sizeof(mddev->clevel))
2746 strncpy(mddev->clevel, buf, len);
2747 if (mddev->clevel[len-1] == '\n')
2749 mddev->clevel[len] = 0;
2750 mddev->level = LEVEL_NONE;
2754 /* request to change the personality. Need to ensure:
2755 * - array is not engaged in resync/recovery/reshape
2756 * - old personality can be suspended
2757 * - new personality will access other array.
2760 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
2763 if (!mddev->pers->quiesce) {
2764 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2765 mdname(mddev), mddev->pers->name);
2769 /* Now find the new personality */
2770 if (len == 0 || len >= sizeof(level))
2772 strncpy(level, buf, len);
2773 if (level[len-1] == '\n')
2777 request_module("md-%s", level);
2778 spin_lock(&pers_lock);
2779 pers = find_pers(LEVEL_NONE, level);
2780 if (!pers || !try_module_get(pers->owner)) {
2781 spin_unlock(&pers_lock);
2782 printk(KERN_WARNING "md: personality %s not loaded\n", level);
2785 spin_unlock(&pers_lock);
2787 if (pers == mddev->pers) {
2788 /* Nothing to do! */
2789 module_put(pers->owner);
2792 if (!pers->takeover) {
2793 module_put(pers->owner);
2794 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
2795 mdname(mddev), level);
2799 /* ->takeover must set new_* and/or delta_disks
2800 * if it succeeds, and may set them when it fails.
2802 priv = pers->takeover(mddev);
2804 mddev->new_level = mddev->level;
2805 mddev->new_layout = mddev->layout;
2806 mddev->new_chunk_sectors = mddev->chunk_sectors;
2807 mddev->raid_disks -= mddev->delta_disks;
2808 mddev->delta_disks = 0;
2809 module_put(pers->owner);
2810 printk(KERN_WARNING "md: %s: %s would not accept array\n",
2811 mdname(mddev), level);
2812 return PTR_ERR(priv);
2815 /* Looks like we have a winner */
2816 mddev_suspend(mddev);
2817 mddev->pers->stop(mddev);
2818 module_put(mddev->pers->owner);
2819 /* Invalidate devices that are now superfluous */
2820 list_for_each_entry(rdev, &mddev->disks, same_set)
2821 if (rdev->raid_disk >= mddev->raid_disks) {
2822 rdev->raid_disk = -1;
2823 clear_bit(In_sync, &rdev->flags);
2826 mddev->private = priv;
2827 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2828 mddev->level = mddev->new_level;
2829 mddev->layout = mddev->new_layout;
2830 mddev->chunk_sectors = mddev->new_chunk_sectors;
2831 mddev->delta_disks = 0;
2833 mddev_resume(mddev);
2834 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2835 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2836 md_wakeup_thread(mddev->thread);
2840 static struct md_sysfs_entry md_level =
2841 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2845 layout_show(mddev_t *mddev, char *page)
2847 /* just a number, not meaningful for all levels */
2848 if (mddev->reshape_position != MaxSector &&
2849 mddev->layout != mddev->new_layout)
2850 return sprintf(page, "%d (%d)\n",
2851 mddev->new_layout, mddev->layout);
2852 return sprintf(page, "%d\n", mddev->layout);
2856 layout_store(mddev_t *mddev, const char *buf, size_t len)
2859 unsigned long n = simple_strtoul(buf, &e, 10);
2861 if (!*buf || (*e && *e != '\n'))
2866 if (mddev->pers->check_reshape == NULL)
2868 mddev->new_layout = n;
2869 err = mddev->pers->check_reshape(mddev);
2871 mddev->new_layout = mddev->layout;
2875 mddev->new_layout = n;
2876 if (mddev->reshape_position == MaxSector)
2881 static struct md_sysfs_entry md_layout =
2882 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2886 raid_disks_show(mddev_t *mddev, char *page)
2888 if (mddev->raid_disks == 0)
2890 if (mddev->reshape_position != MaxSector &&
2891 mddev->delta_disks != 0)
2892 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2893 mddev->raid_disks - mddev->delta_disks);
2894 return sprintf(page, "%d\n", mddev->raid_disks);
2897 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2900 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2904 unsigned long n = simple_strtoul(buf, &e, 10);
2906 if (!*buf || (*e && *e != '\n'))
2910 rv = update_raid_disks(mddev, n);
2911 else if (mddev->reshape_position != MaxSector) {
2912 int olddisks = mddev->raid_disks - mddev->delta_disks;
2913 mddev->delta_disks = n - olddisks;
2914 mddev->raid_disks = n;
2916 mddev->raid_disks = n;
2917 return rv ? rv : len;
2919 static struct md_sysfs_entry md_raid_disks =
2920 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2923 chunk_size_show(mddev_t *mddev, char *page)
2925 if (mddev->reshape_position != MaxSector &&
2926 mddev->chunk_sectors != mddev->new_chunk_sectors)
2927 return sprintf(page, "%d (%d)\n",
2928 mddev->new_chunk_sectors << 9,
2929 mddev->chunk_sectors << 9);
2930 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
2934 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2937 unsigned long n = simple_strtoul(buf, &e, 10);
2939 if (!*buf || (*e && *e != '\n'))
2944 if (mddev->pers->check_reshape == NULL)
2946 mddev->new_chunk_sectors = n >> 9;
2947 err = mddev->pers->check_reshape(mddev);
2949 mddev->new_chunk_sectors = mddev->chunk_sectors;
2953 mddev->new_chunk_sectors = n >> 9;
2954 if (mddev->reshape_position == MaxSector)
2955 mddev->chunk_sectors = n >> 9;
2959 static struct md_sysfs_entry md_chunk_size =
2960 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2963 resync_start_show(mddev_t *mddev, char *page)
2965 if (mddev->recovery_cp == MaxSector)
2966 return sprintf(page, "none\n");
2967 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2971 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2974 unsigned long long n = simple_strtoull(buf, &e, 10);
2978 if (!*buf || (*e && *e != '\n'))
2981 mddev->recovery_cp = n;
2984 static struct md_sysfs_entry md_resync_start =
2985 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2988 * The array state can be:
2991 * No devices, no size, no level
2992 * Equivalent to STOP_ARRAY ioctl
2994 * May have some settings, but array is not active
2995 * all IO results in error
2996 * When written, doesn't tear down array, but just stops it
2997 * suspended (not supported yet)
2998 * All IO requests will block. The array can be reconfigured.
2999 * Writing this, if accepted, will block until array is quiescent
3001 * no resync can happen. no superblocks get written.
3002 * write requests fail
3004 * like readonly, but behaves like 'clean' on a write request.
3006 * clean - no pending writes, but otherwise active.
3007 * When written to inactive array, starts without resync
3008 * If a write request arrives then
3009 * if metadata is known, mark 'dirty' and switch to 'active'.
3010 * if not known, block and switch to write-pending
3011 * If written to an active array that has pending writes, then fails.
3013 * fully active: IO and resync can be happening.
3014 * When written to inactive array, starts with resync
3017 * clean, but writes are blocked waiting for 'active' to be written.
3020 * like active, but no writes have been seen for a while (100msec).
3023 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3024 write_pending, active_idle, bad_word};
3025 static char *array_states[] = {
3026 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3027 "write-pending", "active-idle", NULL };
3029 static int match_word(const char *word, char **list)
3032 for (n=0; list[n]; n++)
3033 if (cmd_match(word, list[n]))
3039 array_state_show(mddev_t *mddev, char *page)
3041 enum array_state st = inactive;
3054 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3056 else if (mddev->safemode)
3062 if (list_empty(&mddev->disks) &&
3063 mddev->raid_disks == 0 &&
3064 mddev->dev_sectors == 0)
3069 return sprintf(page, "%s\n", array_states[st]);
3072 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3073 static int do_md_run(mddev_t * mddev);
3074 static int restart_array(mddev_t *mddev);
3077 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3080 enum array_state st = match_word(buf, array_states);
3085 /* stopping an active array */
3086 if (atomic_read(&mddev->openers) > 0)
3088 err = do_md_stop(mddev, 0, 0);
3091 /* stopping an active array */
3093 if (atomic_read(&mddev->openers) > 0)
3095 err = do_md_stop(mddev, 2, 0);
3097 err = 0; /* already inactive */
3100 break; /* not supported yet */
3103 err = do_md_stop(mddev, 1, 0);
3106 set_disk_ro(mddev->gendisk, 1);
3107 err = do_md_run(mddev);
3113 err = do_md_stop(mddev, 1, 0);
3114 else if (mddev->ro == 1)
3115 err = restart_array(mddev);
3118 set_disk_ro(mddev->gendisk, 0);
3122 err = do_md_run(mddev);
3127 restart_array(mddev);
3128 spin_lock_irq(&mddev->write_lock);
3129 if (atomic_read(&mddev->writes_pending) == 0) {
3130 if (mddev->in_sync == 0) {
3132 if (mddev->safemode == 1)
3133 mddev->safemode = 0;
3134 if (mddev->persistent)
3135 set_bit(MD_CHANGE_CLEAN,
3141 spin_unlock_irq(&mddev->write_lock);
3147 restart_array(mddev);
3148 if (mddev->external)
3149 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3150 wake_up(&mddev->sb_wait);
3154 set_disk_ro(mddev->gendisk, 0);
3155 err = do_md_run(mddev);
3160 /* these cannot be set */
3166 sysfs_notify_dirent(mddev->sysfs_state);
3170 static struct md_sysfs_entry md_array_state =
3171 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3174 null_show(mddev_t *mddev, char *page)
3180 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3182 /* buf must be %d:%d\n? giving major and minor numbers */
3183 /* The new device is added to the array.
3184 * If the array has a persistent superblock, we read the
3185 * superblock to initialise info and check validity.
3186 * Otherwise, only checking done is that in bind_rdev_to_array,
3187 * which mainly checks size.
3190 int major = simple_strtoul(buf, &e, 10);
3196 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3198 minor = simple_strtoul(e+1, &e, 10);
3199 if (*e && *e != '\n')
3201 dev = MKDEV(major, minor);
3202 if (major != MAJOR(dev) ||
3203 minor != MINOR(dev))
3207 if (mddev->persistent) {
3208 rdev = md_import_device(dev, mddev->major_version,
3209 mddev->minor_version);
3210 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3211 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3212 mdk_rdev_t, same_set);
3213 err = super_types[mddev->major_version]
3214 .load_super(rdev, rdev0, mddev->minor_version);
3218 } else if (mddev->external)
3219 rdev = md_import_device(dev, -2, -1);
3221 rdev = md_import_device(dev, -1, -1);
3224 return PTR_ERR(rdev);
3225 err = bind_rdev_to_array(rdev, mddev);
3229 return err ? err : len;
3232 static struct md_sysfs_entry md_new_device =
3233 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3236 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3239 unsigned long chunk, end_chunk;
3243 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3245 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3246 if (buf == end) break;
3247 if (*end == '-') { /* range */
3249 end_chunk = simple_strtoul(buf, &end, 0);
3250 if (buf == end) break;
3252 if (*end && !isspace(*end)) break;
3253 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3255 while (isspace(*buf)) buf++;
3257 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3262 static struct md_sysfs_entry md_bitmap =
3263 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3266 size_show(mddev_t *mddev, char *page)
3268 return sprintf(page, "%llu\n",
3269 (unsigned long long)mddev->dev_sectors / 2);
3272 static int update_size(mddev_t *mddev, sector_t num_sectors);
3275 size_store(mddev_t *mddev, const char *buf, size_t len)
3277 /* If array is inactive, we can reduce the component size, but
3278 * not increase it (except from 0).
3279 * If array is active, we can try an on-line resize
3282 int err = strict_blocks_to_sectors(buf, §ors);
3287 err = update_size(mddev, sectors);
3288 md_update_sb(mddev, 1);
3290 if (mddev->dev_sectors == 0 ||
3291 mddev->dev_sectors > sectors)
3292 mddev->dev_sectors = sectors;
3296 return err ? err : len;
3299 static struct md_sysfs_entry md_size =
3300 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3305 * 'none' for arrays with no metadata (good luck...)
3306 * 'external' for arrays with externally managed metadata,
3307 * or N.M for internally known formats
3310 metadata_show(mddev_t *mddev, char *page)
3312 if (mddev->persistent)
3313 return sprintf(page, "%d.%d\n",
3314 mddev->major_version, mddev->minor_version);
3315 else if (mddev->external)
3316 return sprintf(page, "external:%s\n", mddev->metadata_type);
3318 return sprintf(page, "none\n");
3322 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3326 /* Changing the details of 'external' metadata is
3327 * always permitted. Otherwise there must be
3328 * no devices attached to the array.
3330 if (mddev->external && strncmp(buf, "external:", 9) == 0)
3332 else if (!list_empty(&mddev->disks))
3335 if (cmd_match(buf, "none")) {
3336 mddev->persistent = 0;
3337 mddev->external = 0;
3338 mddev->major_version = 0;
3339 mddev->minor_version = 90;
3342 if (strncmp(buf, "external:", 9) == 0) {
3343 size_t namelen = len-9;
3344 if (namelen >= sizeof(mddev->metadata_type))
3345 namelen = sizeof(mddev->metadata_type)-1;
3346 strncpy(mddev->metadata_type, buf+9, namelen);
3347 mddev->metadata_type[namelen] = 0;
3348 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3349 mddev->metadata_type[--namelen] = 0;
3350 mddev->persistent = 0;
3351 mddev->external = 1;
3352 mddev->major_version = 0;
3353 mddev->minor_version = 90;
3356 major = simple_strtoul(buf, &e, 10);
3357 if (e==buf || *e != '.')
3360 minor = simple_strtoul(buf, &e, 10);
3361 if (e==buf || (*e && *e != '\n') )
3363 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3365 mddev->major_version = major;
3366 mddev->minor_version = minor;
3367 mddev->persistent = 1;
3368 mddev->external = 0;
3372 static struct md_sysfs_entry md_metadata =
3373 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3376 action_show(mddev_t *mddev, char *page)
3378 char *type = "idle";
3379 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3381 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3382 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3383 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3385 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3386 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3388 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3392 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3395 return sprintf(page, "%s\n", type);
3399 action_store(mddev_t *mddev, const char *page, size_t len)
3401 if (!mddev->pers || !mddev->pers->sync_request)
3404 if (cmd_match(page, "frozen"))
3405 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3407 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3409 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3410 if (mddev->sync_thread) {
3411 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3412 md_unregister_thread(mddev->sync_thread);
3413 mddev->sync_thread = NULL;
3414 mddev->recovery = 0;
3416 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3417 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3419 else if (cmd_match(page, "resync"))
3420 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3421 else if (cmd_match(page, "recover")) {
3422 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3423 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3424 } else if (cmd_match(page, "reshape")) {
3426 if (mddev->pers->start_reshape == NULL)
3428 err = mddev->pers->start_reshape(mddev);
3431 sysfs_notify(&mddev->kobj, NULL, "degraded");
3433 if (cmd_match(page, "check"))
3434 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3435 else if (!cmd_match(page, "repair"))
3437 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3438 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3440 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3441 md_wakeup_thread(mddev->thread);
3442 sysfs_notify_dirent(mddev->sysfs_action);
3447 mismatch_cnt_show(mddev_t *mddev, char *page)
3449 return sprintf(page, "%llu\n",
3450 (unsigned long long) mddev->resync_mismatches);
3453 static struct md_sysfs_entry md_scan_mode =
3454 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3457 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3460 sync_min_show(mddev_t *mddev, char *page)
3462 return sprintf(page, "%d (%s)\n", speed_min(mddev),
3463 mddev->sync_speed_min ? "local": "system");
3467 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3471 if (strncmp(buf, "system", 6)==0) {
3472 mddev->sync_speed_min = 0;
3475 min = simple_strtoul(buf, &e, 10);
3476 if (buf == e || (*e && *e != '\n') || min <= 0)
3478 mddev->sync_speed_min = min;
3482 static struct md_sysfs_entry md_sync_min =
3483 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3486 sync_max_show(mddev_t *mddev, char *page)
3488 return sprintf(page, "%d (%s)\n", speed_max(mddev),
3489 mddev->sync_speed_max ? "local": "system");
3493 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3497 if (strncmp(buf, "system", 6)==0) {
3498 mddev->sync_speed_max = 0;
3501 max = simple_strtoul(buf, &e, 10);
3502 if (buf == e || (*e && *e != '\n') || max <= 0)
3504 mddev->sync_speed_max = max;
3508 static struct md_sysfs_entry md_sync_max =
3509 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3512 degraded_show(mddev_t *mddev, char *page)
3514 return sprintf(page, "%d\n", mddev->degraded);
3516 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3519 sync_force_parallel_show(mddev_t *mddev, char *page)
3521 return sprintf(page, "%d\n", mddev->parallel_resync);
3525 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3529 if (strict_strtol(buf, 10, &n))
3532 if (n != 0 && n != 1)
3535 mddev->parallel_resync = n;
3537 if (mddev->sync_thread)
3538 wake_up(&resync_wait);
3543 /* force parallel resync, even with shared block devices */
3544 static struct md_sysfs_entry md_sync_force_parallel =
3545 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3546 sync_force_parallel_show, sync_force_parallel_store);
3549 sync_speed_show(mddev_t *mddev, char *page)
3551 unsigned long resync, dt, db;
3552 if (mddev->curr_resync == 0)
3553 return sprintf(page, "none\n");
3554 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3555 dt = (jiffies - mddev->resync_mark) / HZ;
3557 db = resync - mddev->resync_mark_cnt;
3558 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3561 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3564 sync_completed_show(mddev_t *mddev, char *page)
3566 unsigned long max_sectors, resync;
3568 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3569 return sprintf(page, "none\n");
3571 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3572 max_sectors = mddev->resync_max_sectors;
3574 max_sectors = mddev->dev_sectors;
3576 resync = mddev->curr_resync_completed;
3577 return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3580 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3583 min_sync_show(mddev_t *mddev, char *page)
3585 return sprintf(page, "%llu\n",
3586 (unsigned long long)mddev->resync_min);
3589 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3591 unsigned long long min;
3592 if (strict_strtoull(buf, 10, &min))
3594 if (min > mddev->resync_max)
3596 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3599 /* Must be a multiple of chunk_size */
3600 if (mddev->chunk_sectors) {
3601 sector_t temp = min;
3602 if (sector_div(temp, mddev->chunk_sectors))
3605 mddev->resync_min = min;
3610 static struct md_sysfs_entry md_min_sync =
3611 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3614 max_sync_show(mddev_t *mddev, char *page)
3616 if (mddev->resync_max == MaxSector)
3617 return sprintf(page, "max\n");
3619 return sprintf(page, "%llu\n",
3620 (unsigned long long)mddev->resync_max);
3623 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3625 if (strncmp(buf, "max", 3) == 0)
3626 mddev->resync_max = MaxSector;
3628 unsigned long long max;
3629 if (strict_strtoull(buf, 10, &max))
3631 if (max < mddev->resync_min)
3633 if (max < mddev->resync_max &&
3635 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3638 /* Must be a multiple of chunk_size */
3639 if (mddev->chunk_sectors) {
3640 sector_t temp = max;
3641 if (sector_div(temp, mddev->chunk_sectors))
3644 mddev->resync_max = max;
3646 wake_up(&mddev->recovery_wait);
3650 static struct md_sysfs_entry md_max_sync =
3651 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3654 suspend_lo_show(mddev_t *mddev, char *page)
3656 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3660 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3663 unsigned long long new = simple_strtoull(buf, &e, 10);
3665 if (mddev->pers == NULL ||
3666 mddev->pers->quiesce == NULL)
3668 if (buf == e || (*e && *e != '\n'))
3670 if (new >= mddev->suspend_hi ||
3671 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3672 mddev->suspend_lo = new;
3673 mddev->pers->quiesce(mddev, 2);
3678 static struct md_sysfs_entry md_suspend_lo =
3679 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3683 suspend_hi_show(mddev_t *mddev, char *page)
3685 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3689 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3692 unsigned long long new = simple_strtoull(buf, &e, 10);
3694 if (mddev->pers == NULL ||
3695 mddev->pers->quiesce == NULL)
3697 if (buf == e || (*e && *e != '\n'))
3699 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3700 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3701 mddev->suspend_hi = new;
3702 mddev->pers->quiesce(mddev, 1);
3703 mddev->pers->quiesce(mddev, 0);
3708 static struct md_sysfs_entry md_suspend_hi =
3709 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3712 reshape_position_show(mddev_t *mddev, char *page)
3714 if (mddev->reshape_position != MaxSector)
3715 return sprintf(page, "%llu\n",
3716 (unsigned long long)mddev->reshape_position);
3717 strcpy(page, "none\n");
3722 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3725 unsigned long long new = simple_strtoull(buf, &e, 10);
3728 if (buf == e || (*e && *e != '\n'))
3730 mddev->reshape_position = new;
3731 mddev->delta_disks = 0;
3732 mddev->new_level = mddev->level;
3733 mddev->new_layout = mddev->layout;
3734 mddev->new_chunk_sectors = mddev->chunk_sectors;
3738 static struct md_sysfs_entry md_reshape_position =
3739 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3740 reshape_position_store);
3743 array_size_show(mddev_t *mddev, char *page)
3745 if (mddev->external_size)
3746 return sprintf(page, "%llu\n",
3747 (unsigned long long)mddev->array_sectors/2);
3749 return sprintf(page, "default\n");
3753 array_size_store(mddev_t *mddev, const char *buf, size_t len)
3757 if (strncmp(buf, "default", 7) == 0) {
3759 sectors = mddev->pers->size(mddev, 0, 0);
3761 sectors = mddev->array_sectors;
3763 mddev->external_size = 0;
3765 if (strict_blocks_to_sectors(buf, §ors) < 0)
3767 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
3770 mddev->external_size = 1;
3773 mddev->array_sectors = sectors;
3774 set_capacity(mddev->gendisk, mddev->array_sectors);
3776 revalidate_disk(mddev->gendisk);
3781 static struct md_sysfs_entry md_array_size =
3782 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
3785 static struct attribute *md_default_attrs[] = {
3788 &md_raid_disks.attr,
3789 &md_chunk_size.attr,
3791 &md_resync_start.attr,
3793 &md_new_device.attr,
3794 &md_safe_delay.attr,
3795 &md_array_state.attr,
3796 &md_reshape_position.attr,
3797 &md_array_size.attr,
3801 static struct attribute *md_redundancy_attrs[] = {
3803 &md_mismatches.attr,
3806 &md_sync_speed.attr,
3807 &md_sync_force_parallel.attr,
3808 &md_sync_completed.attr,
3811 &md_suspend_lo.attr,
3812 &md_suspend_hi.attr,
3817 static struct attribute_group md_redundancy_group = {
3819 .attrs = md_redundancy_attrs,
3824 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3826 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3827 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3832 rv = mddev_lock(mddev);
3834 rv = entry->show(mddev, page);
3835 mddev_unlock(mddev);
3841 md_attr_store(struct kobject *kobj, struct attribute *attr,
3842 const char *page, size_t length)
3844 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3845 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3850 if (!capable(CAP_SYS_ADMIN))
3852 rv = mddev_lock(mddev);
3853 if (mddev->hold_active == UNTIL_IOCTL)
3854 mddev->hold_active = 0;
3856 rv = entry->store(mddev, page, length);
3857 mddev_unlock(mddev);
3862 static void md_free(struct kobject *ko)
3864 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3866 if (mddev->sysfs_state)
3867 sysfs_put(mddev->sysfs_state);
3869 if (mddev->gendisk) {
3870 del_gendisk(mddev->gendisk);
3871 put_disk(mddev->gendisk);
3874 blk_cleanup_queue(mddev->queue);
3879 static struct sysfs_ops md_sysfs_ops = {
3880 .show = md_attr_show,
3881 .store = md_attr_store,
3883 static struct kobj_type md_ktype = {
3885 .sysfs_ops = &md_sysfs_ops,
3886 .default_attrs = md_default_attrs,
3891 static void mddev_delayed_delete(struct work_struct *ws)
3893 mddev_t *mddev = container_of(ws, mddev_t, del_work);
3895 if (mddev->private == &md_redundancy_group) {
3896 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3897 if (mddev->sysfs_action)
3898 sysfs_put(mddev->sysfs_action);
3899 mddev->sysfs_action = NULL;
3900 mddev->private = NULL;
3902 kobject_del(&mddev->kobj);
3903 kobject_put(&mddev->kobj);
3906 static int md_alloc(dev_t dev, char *name)
3908 static DEFINE_MUTEX(disks_mutex);
3909 mddev_t *mddev = mddev_find(dev);
3910 struct gendisk *disk;
3919 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
3920 shift = partitioned ? MdpMinorShift : 0;
3921 unit = MINOR(mddev->unit) >> shift;
3923 /* wait for any previous instance if this device
3924 * to be completed removed (mddev_delayed_delete).
3926 flush_scheduled_work();
3928 mutex_lock(&disks_mutex);
3934 /* Need to ensure that 'name' is not a duplicate.
3937 spin_lock(&all_mddevs_lock);
3939 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
3940 if (mddev2->gendisk &&
3941 strcmp(mddev2->gendisk->disk_name, name) == 0) {
3942 spin_unlock(&all_mddevs_lock);
3945 spin_unlock(&all_mddevs_lock);
3949 mddev->queue = blk_alloc_queue(GFP_KERNEL);
3952 mddev->queue->queuedata = mddev;
3954 /* Can be unlocked because the queue is new: no concurrency */
3955 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
3957 blk_queue_make_request(mddev->queue, md_make_request);
3959 disk = alloc_disk(1 << shift);
3961 blk_cleanup_queue(mddev->queue);
3962 mddev->queue = NULL;
3965 disk->major = MAJOR(mddev->unit);
3966 disk->first_minor = unit << shift;
3968 strcpy(disk->disk_name, name);
3969 else if (partitioned)
3970 sprintf(disk->disk_name, "md_d%d", unit);
3972 sprintf(disk->disk_name, "md%d", unit);
3973 disk->fops = &md_fops;
3974 disk->private_data = mddev;
3975 disk->queue = mddev->queue;
3976 /* Allow extended partitions. This makes the
3977 * 'mdp' device redundant, but we can't really
3980 disk->flags |= GENHD_FL_EXT_DEVT;
3982 mddev->gendisk = disk;
3983 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3984 &disk_to_dev(disk)->kobj, "%s", "md");
3986 /* This isn't possible, but as kobject_init_and_add is marked
3987 * __must_check, we must do something with the result
3989 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3994 mutex_unlock(&disks_mutex);
3996 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3997 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state");
4003 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4005 md_alloc(dev, NULL);
4009 static int add_named_array(const char *val, struct kernel_param *kp)
4011 /* val must be "md_*" where * is not all digits.
4012 * We allocate an array with a large free minor number, and
4013 * set the name to val. val must not already be an active name.
4015 int len = strlen(val);
4016 char buf[DISK_NAME_LEN];
4018 while (len && val[len-1] == '\n')
4020 if (len >= DISK_NAME_LEN)
4022 strlcpy(buf, val, len+1);
4023 if (strncmp(buf, "md_", 3) != 0)
4025 return md_alloc(0, buf);
4028 static void md_safemode_timeout(unsigned long data)
4030 mddev_t *mddev = (mddev_t *) data;
4032 if (!atomic_read(&mddev->writes_pending)) {
4033 mddev->safemode = 1;
4034 if (mddev->external)
4035 sysfs_notify_dirent(mddev->sysfs_state);
4037 md_wakeup_thread(mddev->thread);
4040 static int start_dirty_degraded;
4042 static int do_md_run(mddev_t * mddev)
4046 struct gendisk *disk;
4047 struct mdk_personality *pers;
4049 if (list_empty(&mddev->disks))
4050 /* cannot run an array with no devices.. */
4057 * Analyze all RAID superblock(s)
4059 if (!mddev->raid_disks) {
4060 if (!mddev->persistent)
4065 if (mddev->level != LEVEL_NONE)
4066 request_module("md-level-%d", mddev->level);
4067 else if (mddev->clevel[0])
4068 request_module("md-%s", mddev->clevel);
4071 * Drop all container device buffers, from now on
4072 * the only valid external interface is through the md
4075 list_for_each_entry(rdev, &mddev->disks, same_set) {
4076 if (test_bit(Faulty, &rdev->flags))
4078 sync_blockdev(rdev->bdev);
4079 invalidate_bdev(rdev->bdev);
4081 /* perform some consistency tests on the device.
4082 * We don't want the data to overlap the metadata,
4083 * Internal Bitmap issues have been handled elsewhere.
4085 if (rdev->data_offset < rdev->sb_start) {
4086 if (mddev->dev_sectors &&
4087 rdev->data_offset + mddev->dev_sectors
4089 printk("md: %s: data overlaps metadata\n",
4094 if (rdev->sb_start + rdev->sb_size/512
4095 > rdev->data_offset) {
4096 printk("md: %s: metadata overlaps data\n",
4101 sysfs_notify_dirent(rdev->sysfs_state);
4104 md_probe(mddev->unit, NULL, NULL);
4105 disk = mddev->gendisk;
4109 spin_lock(&pers_lock);
4110 pers = find_pers(mddev->level, mddev->clevel);
4111 if (!pers || !try_module_get(pers->owner)) {
4112 spin_unlock(&pers_lock);
4113 if (mddev->level != LEVEL_NONE)
4114 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4117 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4122 spin_unlock(&pers_lock);
4123 if (mddev->level != pers->level) {
4124 mddev->level = pers->level;
4125 mddev->new_level = pers->level;
4127 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4129 if (mddev->reshape_position != MaxSector &&
4130 pers->start_reshape == NULL) {
4131 /* This personality cannot handle reshaping... */
4133 module_put(pers->owner);
4137 if (pers->sync_request) {
4138 /* Warn if this is a potentially silly
4141 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4145 list_for_each_entry(rdev, &mddev->disks, same_set)
4146 list_for_each_entry(rdev2, &mddev->disks, same_set) {
4148 rdev->bdev->bd_contains ==
4149 rdev2->bdev->bd_contains) {
4151 "%s: WARNING: %s appears to be"
4152 " on the same physical disk as"
4155 bdevname(rdev->bdev,b),
4156 bdevname(rdev2->bdev,b2));
4163 "True protection against single-disk"
4164 " failure might be compromised.\n");
4167 mddev->recovery = 0;
4168 /* may be over-ridden by personality */
4169 mddev->resync_max_sectors = mddev->dev_sectors;
4171 mddev->barriers_work = 1;
4172 mddev->ok_start_degraded = start_dirty_degraded;
4175 mddev->ro = 2; /* read-only, but switch on first write */
4177 err = mddev->pers->run(mddev);
4179 printk(KERN_ERR "md: pers->run() failed ...\n");
4180 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4181 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4182 " but 'external_size' not in effect?\n", __func__);
4184 "md: invalid array_size %llu > default size %llu\n",
4185 (unsigned long long)mddev->array_sectors / 2,
4186 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4188 mddev->pers->stop(mddev);
4190 if (err == 0 && mddev->pers->sync_request) {
4191 err = bitmap_create(mddev);
4193 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4194 mdname(mddev), err);
4195 mddev->pers->stop(mddev);
4199 module_put(mddev->pers->owner);
4201 bitmap_destroy(mddev);
4204 if (mddev->pers->sync_request) {
4205 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4207 "md: cannot register extra attributes for %s\n",
4209 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4210 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4213 atomic_set(&mddev->writes_pending,0);
4214 mddev->safemode = 0;
4215 mddev->safemode_timer.function = md_safemode_timeout;
4216 mddev->safemode_timer.data = (unsigned long) mddev;
4217 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4220 list_for_each_entry(rdev, &mddev->disks, same_set)
4221 if (rdev->raid_disk >= 0) {
4223 sprintf(nm, "rd%d", rdev->raid_disk);
4224 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4225 printk("md: cannot register %s for %s\n",
4229 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4232 md_update_sb(mddev, 0);
4234 set_capacity(disk, mddev->array_sectors);
4236 /* If there is a partially-recovered drive we need to
4237 * start recovery here. If we leave it to md_check_recovery,
4238 * it will remove the drives and not do the right thing
4240 if (mddev->degraded && !mddev->sync_thread) {
4242 list_for_each_entry(rdev, &mddev->disks, same_set)
4243 if (rdev->raid_disk >= 0 &&
4244 !test_bit(In_sync, &rdev->flags) &&
4245 !test_bit(Faulty, &rdev->flags))
4246 /* complete an interrupted recovery */
4248 if (spares && mddev->pers->sync_request) {
4249 mddev->recovery = 0;
4250 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4251 mddev->sync_thread = md_register_thread(md_do_sync,
4254 if (!mddev->sync_thread) {
4255 printk(KERN_ERR "%s: could not start resync"
4258 /* leave the spares where they are, it shouldn't hurt */
4259 mddev->recovery = 0;
4263 md_wakeup_thread(mddev->thread);
4264 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4266 revalidate_disk(mddev->gendisk);
4268 md_new_event(mddev);
4269 sysfs_notify_dirent(mddev->sysfs_state);
4270 if (mddev->sysfs_action)
4271 sysfs_notify_dirent(mddev->sysfs_action);
4272 sysfs_notify(&mddev->kobj, NULL, "degraded");
4273 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4277 static int restart_array(mddev_t *mddev)
4279 struct gendisk *disk = mddev->gendisk;
4281 /* Complain if it has no devices */
4282 if (list_empty(&mddev->disks))
4288 mddev->safemode = 0;
4290 set_disk_ro(disk, 0);
4291 printk(KERN_INFO "md: %s switched to read-write mode.\n",
4293 /* Kick recovery or resync if necessary */
4294 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4295 md_wakeup_thread(mddev->thread);
4296 md_wakeup_thread(mddev->sync_thread);
4297 sysfs_notify_dirent(mddev->sysfs_state);
4301 /* similar to deny_write_access, but accounts for our holding a reference
4302 * to the file ourselves */
4303 static int deny_bitmap_write_access(struct file * file)
4305 struct inode *inode = file->f_mapping->host;
4307 spin_lock(&inode->i_lock);
4308 if (atomic_read(&inode->i_writecount) > 1) {
4309 spin_unlock(&inode->i_lock);
4312 atomic_set(&inode->i_writecount, -1);
4313 spin_unlock(&inode->i_lock);
4318 static void restore_bitmap_write_access(struct file *file)
4320 struct inode *inode = file->f_mapping->host;
4322 spin_lock(&inode->i_lock);
4323 atomic_set(&inode->i_writecount, 1);
4324 spin_unlock(&inode->i_lock);
4328 * 0 - completely stop and dis-assemble array
4329 * 1 - switch to readonly
4330 * 2 - stop but do not disassemble array
4332 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4335 struct gendisk *disk = mddev->gendisk;
4338 mutex_lock(&mddev->open_mutex);
4339 if (atomic_read(&mddev->openers) > is_open) {
4340 printk("md: %s still in use.\n",mdname(mddev));
4342 } else if (mddev->pers) {
4344 if (mddev->sync_thread) {
4345 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4346 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4347 md_unregister_thread(mddev->sync_thread);
4348 mddev->sync_thread = NULL;
4351 del_timer_sync(&mddev->safemode_timer);
4354 case 1: /* readonly */
4360 case 0: /* disassemble */
4362 bitmap_flush(mddev);
4363 md_super_wait(mddev);
4365 set_disk_ro(disk, 0);
4367 mddev->pers->stop(mddev);
4368 mddev->queue->merge_bvec_fn = NULL;
4369 mddev->queue->unplug_fn = NULL;
4370 mddev->queue->backing_dev_info.congested_fn = NULL;
4371 module_put(mddev->pers->owner);
4372 if (mddev->pers->sync_request)
4373 mddev->private = &md_redundancy_group;
4375 /* tell userspace to handle 'inactive' */
4376 sysfs_notify_dirent(mddev->sysfs_state);
4378 list_for_each_entry(rdev, &mddev->disks, same_set)
4379 if (rdev->raid_disk >= 0) {
4381 sprintf(nm, "rd%d", rdev->raid_disk);
4382 sysfs_remove_link(&mddev->kobj, nm);
4385 set_capacity(disk, 0);
4391 if (!mddev->in_sync || mddev->flags) {
4392 /* mark array as shutdown cleanly */
4394 md_update_sb(mddev, 1);
4397 set_disk_ro(disk, 1);
4398 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4402 mutex_unlock(&mddev->open_mutex);
4406 * Free resources if final stop
4410 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4412 bitmap_destroy(mddev);
4413 if (mddev->bitmap_file) {
4414 restore_bitmap_write_access(mddev->bitmap_file);
4415 fput(mddev->bitmap_file);
4416 mddev->bitmap_file = NULL;
4418 mddev->bitmap_offset = 0;
4420 /* make sure all md_delayed_delete calls have finished */
4421 flush_scheduled_work();
4423 export_array(mddev);
4425 mddev->array_sectors = 0;
4426 mddev->external_size = 0;
4427 mddev->dev_sectors = 0;
4428 mddev->raid_disks = 0;
4429 mddev->recovery_cp = 0;
4430 mddev->resync_min = 0;
4431 mddev->resync_max = MaxSector;
4432 mddev->reshape_position = MaxSector;
4433 mddev->external = 0;
4434 mddev->persistent = 0;
4435 mddev->level = LEVEL_NONE;
4436 mddev->clevel[0] = 0;
4439 mddev->metadata_type[0] = 0;
4440 mddev->chunk_sectors = 0;
4441 mddev->ctime = mddev->utime = 0;
4443 mddev->max_disks = 0;
4445 mddev->delta_disks = 0;
4446 mddev->new_level = LEVEL_NONE;
4447 mddev->new_layout = 0;
4448 mddev->new_chunk_sectors = 0;
4449 mddev->curr_resync = 0;
4450 mddev->resync_mismatches = 0;
4451 mddev->suspend_lo = mddev->suspend_hi = 0;
4452 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4453 mddev->recovery = 0;
4456 mddev->degraded = 0;
4457 mddev->barriers_work = 0;
4458 mddev->safemode = 0;
4459 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4460 if (mddev->hold_active == UNTIL_STOP)
4461 mddev->hold_active = 0;
4463 } else if (mddev->pers)
4464 printk(KERN_INFO "md: %s switched to read-only mode.\n",
4467 blk_integrity_unregister(disk);
4468 md_new_event(mddev);
4469 sysfs_notify_dirent(mddev->sysfs_state);
4474 static void autorun_array(mddev_t *mddev)
4479 if (list_empty(&mddev->disks))
4482 printk(KERN_INFO "md: running: ");
4484 list_for_each_entry(rdev, &mddev->disks, same_set) {
4485 char b[BDEVNAME_SIZE];
4486 printk("<%s>", bdevname(rdev->bdev,b));
4490 err = do_md_run(mddev);
4492 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4493 do_md_stop(mddev, 0, 0);
4498 * lets try to run arrays based on all disks that have arrived
4499 * until now. (those are in pending_raid_disks)
4501 * the method: pick the first pending disk, collect all disks with
4502 * the same UUID, remove all from the pending list and put them into
4503 * the 'same_array' list. Then order this list based on superblock
4504 * update time (freshest comes first), kick out 'old' disks and
4505 * compare superblocks. If everything's fine then run it.
4507 * If "unit" is allocated, then bump its reference count
4509 static void autorun_devices(int part)
4511 mdk_rdev_t *rdev0, *rdev, *tmp;
4513 char b[BDEVNAME_SIZE];
4515 printk(KERN_INFO "md: autorun ...\n");
4516 while (!list_empty(&pending_raid_disks)) {
4519 LIST_HEAD(candidates);
4520 rdev0 = list_entry(pending_raid_disks.next,
4521 mdk_rdev_t, same_set);
4523 printk(KERN_INFO "md: considering %s ...\n",
4524 bdevname(rdev0->bdev,b));
4525 INIT_LIST_HEAD(&candidates);
4526 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4527 if (super_90_load(rdev, rdev0, 0) >= 0) {
4528 printk(KERN_INFO "md: adding %s ...\n",
4529 bdevname(rdev->bdev,b));
4530 list_move(&rdev->same_set, &candidates);
4533 * now we have a set of devices, with all of them having
4534 * mostly sane superblocks. It's time to allocate the
4538 dev = MKDEV(mdp_major,
4539 rdev0->preferred_minor << MdpMinorShift);
4540 unit = MINOR(dev) >> MdpMinorShift;
4542 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4545 if (rdev0->preferred_minor != unit) {
4546 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4547 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4551 md_probe(dev, NULL, NULL);
4552 mddev = mddev_find(dev);
4553 if (!mddev || !mddev->gendisk) {
4557 "md: cannot allocate memory for md drive.\n");
4560 if (mddev_lock(mddev))
4561 printk(KERN_WARNING "md: %s locked, cannot run\n",
4563 else if (mddev->raid_disks || mddev->major_version
4564 || !list_empty(&mddev->disks)) {
4566 "md: %s already running, cannot run %s\n",
4567 mdname(mddev), bdevname(rdev0->bdev,b));
4568 mddev_unlock(mddev);
4570 printk(KERN_INFO "md: created %s\n", mdname(mddev));
4571 mddev->persistent = 1;
4572 rdev_for_each_list(rdev, tmp, &candidates) {
4573 list_del_init(&rdev->same_set);
4574 if (bind_rdev_to_array(rdev, mddev))
4577 autorun_array(mddev);
4578 mddev_unlock(mddev);
4580 /* on success, candidates will be empty, on error
4583 rdev_for_each_list(rdev, tmp, &candidates) {
4584 list_del_init(&rdev->same_set);
4589 printk(KERN_INFO "md: ... autorun DONE.\n");
4591 #endif /* !MODULE */
4593 static int get_version(void __user * arg)
4597 ver.major = MD_MAJOR_VERSION;
4598 ver.minor = MD_MINOR_VERSION;
4599 ver.patchlevel = MD_PATCHLEVEL_VERSION;
4601 if (copy_to_user(arg, &ver, sizeof(ver)))
4607 static int get_array_info(mddev_t * mddev, void __user * arg)
4609 mdu_array_info_t info;
4610 int nr,working,insync,failed,spare;
4613 nr=working=insync=failed=spare=0;
4614 list_for_each_entry(rdev, &mddev->disks, same_set) {
4616 if (test_bit(Faulty, &rdev->flags))
4620 if (test_bit(In_sync, &rdev->flags))
4627 info.major_version = mddev->major_version;
4628 info.minor_version = mddev->minor_version;
4629 info.patch_version = MD_PATCHLEVEL_VERSION;
4630 info.ctime = mddev->ctime;
4631 info.level = mddev->level;
4632 info.size = mddev->dev_sectors / 2;
4633 if (info.size != mddev->dev_sectors / 2) /* overflow */
4636 info.raid_disks = mddev->raid_disks;
4637 info.md_minor = mddev->md_minor;
4638 info.not_persistent= !mddev->persistent;
4640 info.utime = mddev->utime;
4643 info.state = (1<<MD_SB_CLEAN);
4644 if (mddev->bitmap && mddev->bitmap_offset)
4645 info.state = (1<<MD_SB_BITMAP_PRESENT);
4646 info.active_disks = insync;
4647 info.working_disks = working;
4648 info.failed_disks = failed;
4649 info.spare_disks = spare;
4651 info.layout = mddev->layout;
4652 info.chunk_size = mddev->chunk_sectors << 9;
4654 if (copy_to_user(arg, &info, sizeof(info)))
4660 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4662 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4663 char *ptr, *buf = NULL;
4666 if (md_allow_write(mddev))
4667 file = kmalloc(sizeof(*file), GFP_NOIO);
4669 file = kmalloc(sizeof(*file), GFP_KERNEL);
4674 /* bitmap disabled, zero the first byte and copy out */
4675 if (!mddev->bitmap || !mddev->bitmap->file) {
4676 file->pathname[0] = '\0';
4680 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4684 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4688 strcpy(file->pathname, ptr);
4692 if (copy_to_user(arg, file, sizeof(*file)))
4700 static int get_disk_info(mddev_t * mddev, void __user * arg)
4702 mdu_disk_info_t info;
4705 if (copy_from_user(&info, arg, sizeof(info)))
4708 rdev = find_rdev_nr(mddev, info.number);
4710 info.major = MAJOR(rdev->bdev->bd_dev);
4711 info.minor = MINOR(rdev->bdev->bd_dev);
4712 info.raid_disk = rdev->raid_disk;
4714 if (test_bit(Faulty, &rdev->flags))
4715 info.state |= (1<<MD_DISK_FAULTY);
4716 else if (test_bit(In_sync, &rdev->flags)) {
4717 info.state |= (1<<MD_DISK_ACTIVE);
4718 info.state |= (1<<MD_DISK_SYNC);
4720 if (test_bit(WriteMostly, &rdev->flags))
4721 info.state |= (1<<MD_DISK_WRITEMOSTLY);
4723 info.major = info.minor = 0;
4724 info.raid_disk = -1;
4725 info.state = (1<<MD_DISK_REMOVED);
4728 if (copy_to_user(arg, &info, sizeof(info)))
4734 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4736 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4738 dev_t dev = MKDEV(info->major,info->minor);
4740 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4743 if (!mddev->raid_disks) {
4745 /* expecting a device which has a superblock */
4746 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4749 "md: md_import_device returned %ld\n",
4751 return PTR_ERR(rdev);
4753 if (!list_empty(&mddev->disks)) {
4754 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4755 mdk_rdev_t, same_set);
4756 err = super_types[mddev->major_version]
4757 .load_super(rdev, rdev0, mddev->minor_version);
4760 "md: %s has different UUID to %s\n",
4761 bdevname(rdev->bdev,b),
4762 bdevname(rdev0->bdev,b2));
4767 err = bind_rdev_to_array(rdev, mddev);
4774 * add_new_disk can be used once the array is assembled
4775 * to add "hot spares". They must already have a superblock
4780 if (!mddev->pers->hot_add_disk) {
4782 "%s: personality does not support diskops!\n",
4786 if (mddev->persistent)
4787 rdev = md_import_device(dev, mddev->major_version,
4788 mddev->minor_version);
4790 rdev = md_import_device(dev, -1, -1);
4793 "md: md_import_device returned %ld\n",
4795 return PTR_ERR(rdev);
4797 /* set save_raid_disk if appropriate */
4798 if (!mddev->persistent) {
4799 if (info->state & (1<<MD_DISK_SYNC) &&
4800 info->raid_disk < mddev->raid_disks)
4801 rdev->raid_disk = info->raid_disk;
4803 rdev->raid_disk = -1;
4805 super_types[mddev->major_version].
4806 validate_super(mddev, rdev);
4807 rdev->saved_raid_disk = rdev->raid_disk;
4809 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4810 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4811 set_bit(WriteMostly, &rdev->flags);
4813 clear_bit(WriteMostly, &rdev->flags);
4815 rdev->raid_disk = -1;
4816 err = bind_rdev_to_array(rdev, mddev);
4817 if (!err && !mddev->pers->hot_remove_disk) {
4818 /* If there is hot_add_disk but no hot_remove_disk
4819 * then added disks for geometry changes,
4820 * and should be added immediately.
4822 super_types[mddev->major_version].
4823 validate_super(mddev, rdev);
4824 err = mddev->pers->hot_add_disk(mddev, rdev);
4826 unbind_rdev_from_array(rdev);
4831 sysfs_notify_dirent(rdev->sysfs_state);
4833 md_update_sb(mddev, 1);
4834 if (mddev->degraded)
4835 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4836 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4837 md_wakeup_thread(mddev->thread);
4841 /* otherwise, add_new_disk is only allowed
4842 * for major_version==0 superblocks
4844 if (mddev->major_version != 0) {
4845 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4850 if (!(info->state & (1<<MD_DISK_FAULTY))) {
4852 rdev = md_import_device(dev, -1, 0);
4855 "md: error, md_import_device() returned %ld\n",
4857 return PTR_ERR(rdev);
4859 rdev->desc_nr = info->number;
4860 if (info->raid_disk < mddev->raid_disks)
4861 rdev->raid_disk = info->raid_disk;
4863 rdev->raid_disk = -1;
4865 if (rdev->raid_disk < mddev->raid_disks)
4866 if (info->state & (1<<MD_DISK_SYNC))
4867 set_bit(In_sync, &rdev->flags);
4869 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4870 set_bit(WriteMostly, &rdev->flags);
4872 if (!mddev->persistent) {
4873 printk(KERN_INFO "md: nonpersistent superblock ...\n");
4874 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4876 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4877 rdev->sectors = rdev->sb_start;
4879 err = bind_rdev_to_array(rdev, mddev);
4889 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4891 char b[BDEVNAME_SIZE];
4894 rdev = find_rdev(mddev, dev);
4898 if (rdev->raid_disk >= 0)
4901 kick_rdev_from_array(rdev);
4902 md_update_sb(mddev, 1);
4903 md_new_event(mddev);
4907 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4908 bdevname(rdev->bdev,b), mdname(mddev));
4912 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4914 char b[BDEVNAME_SIZE];
4921 if (mddev->major_version != 0) {
4922 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4923 " version-0 superblocks.\n",
4927 if (!mddev->pers->hot_add_disk) {
4929 "%s: personality does not support diskops!\n",
4934 rdev = md_import_device(dev, -1, 0);
4937 "md: error, md_import_device() returned %ld\n",
4942 if (mddev->persistent)
4943 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4945 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4947 rdev->sectors = rdev->sb_start;
4949 if (test_bit(Faulty, &rdev->flags)) {
4951 "md: can not hot-add faulty %s disk to %s!\n",
4952 bdevname(rdev->bdev,b), mdname(mddev));
4956 clear_bit(In_sync, &rdev->flags);
4958 rdev->saved_raid_disk = -1;
4959 err = bind_rdev_to_array(rdev, mddev);
4964 * The rest should better be atomic, we can have disk failures
4965 * noticed in interrupt contexts ...
4968 rdev->raid_disk = -1;
4970 md_update_sb(mddev, 1);
4973 * Kick recovery, maybe this spare has to be added to the
4974 * array immediately.
4976 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4977 md_wakeup_thread(mddev->thread);
4978 md_new_event(mddev);
4986 static int set_bitmap_file(mddev_t *mddev, int fd)
4991 if (!mddev->pers->quiesce)
4993 if (mddev->recovery || mddev->sync_thread)
4995 /* we should be able to change the bitmap.. */
5001 return -EEXIST; /* cannot add when bitmap is present */
5002 mddev->bitmap_file = fget(fd);
5004 if (mddev->bitmap_file == NULL) {
5005 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5010 err = deny_bitmap_write_access(mddev->bitmap_file);
5012 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5014 fput(mddev->bitmap_file);
5015 mddev->bitmap_file = NULL;
5018 mddev->bitmap_offset = 0; /* file overrides offset */
5019 } else if (mddev->bitmap == NULL)
5020 return -ENOENT; /* cannot remove what isn't there */
5023 mddev->pers->quiesce(mddev, 1);
5025 err = bitmap_create(mddev);
5026 if (fd < 0 || err) {
5027 bitmap_destroy(mddev);
5028 fd = -1; /* make sure to put the file */
5030 mddev->pers->quiesce(mddev, 0);
5033 if (mddev->bitmap_file) {
5034 restore_bitmap_write_access(mddev->bitmap_file);
5035 fput(mddev->bitmap_file);
5037 mddev->bitmap_file = NULL;
5044 * set_array_info is used two different ways
5045 * The original usage is when creating a new array.
5046 * In this usage, raid_disks is > 0 and it together with
5047 * level, size, not_persistent,layout,chunksize determine the
5048 * shape of the array.
5049 * This will always create an array with a type-0.90.0 superblock.
5050 * The newer usage is when assembling an array.
5051 * In this case raid_disks will be 0, and the major_version field is
5052 * use to determine which style super-blocks are to be found on the devices.
5053 * The minor and patch _version numbers are also kept incase the
5054 * super_block handler wishes to interpret them.
5056 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5059 if (info->raid_disks == 0) {
5060 /* just setting version number for superblock loading */
5061 if (info->major_version < 0 ||
5062 info->major_version >= ARRAY_SIZE(super_types) ||
5063 super_types[info->major_version].name == NULL) {
5064 /* maybe try to auto-load a module? */
5066 "md: superblock version %d not known\n",
5067 info->major_version);
5070 mddev->major_version = info->major_version;
5071 mddev->minor_version = info->minor_version;
5072 mddev->patch_version = info->patch_version;
5073 mddev->persistent = !info->not_persistent;
5076 mddev->major_version = MD_MAJOR_VERSION;
5077 mddev->minor_version = MD_MINOR_VERSION;
5078 mddev->patch_version = MD_PATCHLEVEL_VERSION;
5079 mddev->ctime = get_seconds();
5081 mddev->level = info->level;
5082 mddev->clevel[0] = 0;
5083 mddev->dev_sectors = 2 * (sector_t)info->size;
5084 mddev->raid_disks = info->raid_disks;
5085 /* don't set md_minor, it is determined by which /dev/md* was
5088 if (info->state & (1<<MD_SB_CLEAN))
5089 mddev->recovery_cp = MaxSector;
5091 mddev->recovery_cp = 0;
5092 mddev->persistent = ! info->not_persistent;
5093 mddev->external = 0;
5095 mddev->layout = info->layout;
5096 mddev->chunk_sectors = info->chunk_size >> 9;
5098 mddev->max_disks = MD_SB_DISKS;
5100 if (mddev->persistent)
5102 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5104 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
5105 mddev->bitmap_offset = 0;
5107 mddev->reshape_position = MaxSector;
5110 * Generate a 128 bit UUID
5112 get_random_bytes(mddev->uuid, 16);
5114 mddev->new_level = mddev->level;
5115 mddev->new_chunk_sectors = mddev->chunk_sectors;
5116 mddev->new_layout = mddev->layout;
5117 mddev->delta_disks = 0;
5122 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5124 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5126 if (mddev->external_size)
5129 mddev->array_sectors = array_sectors;
5131 EXPORT_SYMBOL(md_set_array_sectors);
5133 static int update_size(mddev_t *mddev, sector_t num_sectors)
5137 int fit = (num_sectors == 0);
5139 if (mddev->pers->resize == NULL)
5141 /* The "num_sectors" is the number of sectors of each device that
5142 * is used. This can only make sense for arrays with redundancy.
5143 * linear and raid0 always use whatever space is available. We can only
5144 * consider changing this number if no resync or reconstruction is
5145 * happening, and if the new size is acceptable. It must fit before the
5146 * sb_start or, if that is <data_offset, it must fit before the size
5147 * of each device. If num_sectors is zero, we find the largest size
5151 if (mddev->sync_thread)
5154 /* Sorry, cannot grow a bitmap yet, just remove it,
5158 list_for_each_entry(rdev, &mddev->disks, same_set) {
5159 sector_t avail = rdev->sectors;
5161 if (fit && (num_sectors == 0 || num_sectors > avail))
5162 num_sectors = avail;
5163 if (avail < num_sectors)
5166 rv = mddev->pers->resize(mddev, num_sectors);
5168 revalidate_disk(mddev->gendisk);
5172 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5175 /* change the number of raid disks */
5176 if (mddev->pers->check_reshape == NULL)
5178 if (raid_disks <= 0 ||
5179 raid_disks >= mddev->max_disks)
5181 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5183 mddev->delta_disks = raid_disks - mddev->raid_disks;
5185 rv = mddev->pers->check_reshape(mddev);
5191 * update_array_info is used to change the configuration of an
5193 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5194 * fields in the info are checked against the array.
5195 * Any differences that cannot be handled will cause an error.
5196 * Normally, only one change can be managed at a time.
5198 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5204 /* calculate expected state,ignoring low bits */
5205 if (mddev->bitmap && mddev->bitmap_offset)
5206 state |= (1 << MD_SB_BITMAP_PRESENT);
5208 if (mddev->major_version != info->major_version ||
5209 mddev->minor_version != info->minor_version ||
5210 /* mddev->patch_version != info->patch_version || */
5211 mddev->ctime != info->ctime ||
5212 mddev->level != info->level ||
5213 /* mddev->layout != info->layout || */
5214 !mddev->persistent != info->not_persistent||
5215 mddev->chunk_sectors != info->chunk_size >> 9 ||
5216 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5217 ((state^info->state) & 0xfffffe00)
5220 /* Check there is only one change */
5221 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5223 if (mddev->raid_disks != info->raid_disks)
5225 if (mddev->layout != info->layout)
5227 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5234 if (mddev->layout != info->layout) {
5236 * we don't need to do anything at the md level, the
5237 * personality will take care of it all.
5239 if (mddev->pers->check_reshape == NULL)
5242 mddev->new_layout = info->layout;
5243 rv = mddev->pers->check_reshape(mddev);
5245 mddev->new_layout = mddev->layout;
5249 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5250 rv = update_size(mddev, (sector_t)info->size * 2);
5252 if (mddev->raid_disks != info->raid_disks)
5253 rv = update_raid_disks(mddev, info->raid_disks);
5255 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5256 if (mddev->pers->quiesce == NULL)
5258 if (mddev->recovery || mddev->sync_thread)
5260 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5261 /* add the bitmap */
5264 if (mddev->default_bitmap_offset == 0)
5266 mddev->bitmap_offset = mddev->default_bitmap_offset;
5267 mddev->pers->quiesce(mddev, 1);
5268 rv = bitmap_create(mddev);
5270 bitmap_destroy(mddev);
5271 mddev->pers->quiesce(mddev, 0);
5273 /* remove the bitmap */
5276 if (mddev->bitmap->file)
5278 mddev->pers->quiesce(mddev, 1);
5279 bitmap_destroy(mddev);
5280 mddev->pers->quiesce(mddev, 0);
5281 mddev->bitmap_offset = 0;
5284 md_update_sb(mddev, 1);
5288 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5292 if (mddev->pers == NULL)
5295 rdev = find_rdev(mddev, dev);
5299 md_error(mddev, rdev);
5304 * We have a problem here : there is no easy way to give a CHS
5305 * virtual geometry. We currently pretend that we have a 2 heads
5306 * 4 sectors (with a BIG number of cylinders...). This drives
5307 * dosfs just mad... ;-)
5309 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5311 mddev_t *mddev = bdev->bd_disk->private_data;
5315 geo->cylinders = get_capacity(mddev->gendisk) / 8;
5319 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5320 unsigned int cmd, unsigned long arg)
5323 void __user *argp = (void __user *)arg;
5324 mddev_t *mddev = NULL;
5326 if (!capable(CAP_SYS_ADMIN))
5330 * Commands dealing with the RAID driver but not any
5336 err = get_version(argp);
5339 case PRINT_RAID_DEBUG:
5347 autostart_arrays(arg);
5354 * Commands creating/starting a new array:
5357 mddev = bdev->bd_disk->private_data;
5364 err = mddev_lock(mddev);
5367 "md: ioctl lock interrupted, reason %d, cmd %d\n",
5374 case SET_ARRAY_INFO:
5376 mdu_array_info_t info;
5378 memset(&info, 0, sizeof(info));
5379 else if (copy_from_user(&info, argp, sizeof(info))) {
5384 err = update_array_info(mddev, &info);
5386 printk(KERN_WARNING "md: couldn't update"
5387 " array info. %d\n", err);
5392 if (!list_empty(&mddev->disks)) {
5394 "md: array %s already has disks!\n",
5399 if (mddev->raid_disks) {
5401 "md: array %s already initialised!\n",
5406 err = set_array_info(mddev, &info);
5408 printk(KERN_WARNING "md: couldn't set"
5409 " array info. %d\n", err);
5419 * Commands querying/configuring an existing array:
5421 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5422 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5423 if ((!mddev->raid_disks && !mddev->external)
5424 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5425 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5426 && cmd != GET_BITMAP_FILE) {
5432 * Commands even a read-only array can execute:
5436 case GET_ARRAY_INFO:
5437 err = get_array_info(mddev, argp);
5440 case GET_BITMAP_FILE:
5441 err = get_bitmap_file(mddev, argp);
5445 err = get_disk_info(mddev, argp);
5448 case RESTART_ARRAY_RW:
5449 err = restart_array(mddev);
5453 err = do_md_stop(mddev, 0, 1);
5457 err = do_md_stop(mddev, 1, 1);
5463 * The remaining ioctls are changing the state of the
5464 * superblock, so we do not allow them on read-only arrays.
5465 * However non-MD ioctls (e.g. get-size) will still come through
5466 * here and hit the 'default' below, so only disallow
5467 * 'md' ioctls, and switch to rw mode if started auto-readonly.
5469 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5470 if (mddev->ro == 2) {
5472 sysfs_notify_dirent(mddev->sysfs_state);
5473 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5474 md_wakeup_thread(mddev->thread);
5485 mdu_disk_info_t info;
5486 if (copy_from_user(&info, argp, sizeof(info)))
5489 err = add_new_disk(mddev, &info);
5493 case HOT_REMOVE_DISK:
5494 err = hot_remove_disk(mddev, new_decode_dev(arg));
5498 err = hot_add_disk(mddev, new_decode_dev(arg));
5501 case SET_DISK_FAULTY:
5502 err = set_disk_faulty(mddev, new_decode_dev(arg));
5506 err = do_md_run(mddev);
5509 case SET_BITMAP_FILE:
5510 err = set_bitmap_file(mddev, (int)arg);
5520 if (mddev->hold_active == UNTIL_IOCTL &&
5522 mddev->hold_active = 0;
5523 mddev_unlock(mddev);
5533 static int md_open(struct block_device *bdev, fmode_t mode)
5536 * Succeed if we can lock the mddev, which confirms that
5537 * it isn't being stopped right now.
5539 mddev_t *mddev = mddev_find(bdev->bd_dev);
5542 if (mddev->gendisk != bdev->bd_disk) {
5543 /* we are racing with mddev_put which is discarding this
5547 /* Wait until bdev->bd_disk is definitely gone */
5548 flush_scheduled_work();
5549 /* Then retry the open from the top */
5550 return -ERESTARTSYS;
5552 BUG_ON(mddev != bdev->bd_disk->private_data);
5554 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5558 atomic_inc(&mddev->openers);
5559 mutex_unlock(&mddev->open_mutex);
5561 check_disk_change(bdev);
5566 static int md_release(struct gendisk *disk, fmode_t mode)
5568 mddev_t *mddev = disk->private_data;
5571 atomic_dec(&mddev->openers);
5577 static int md_media_changed(struct gendisk *disk)
5579 mddev_t *mddev = disk->private_data;
5581 return mddev->changed;
5584 static int md_revalidate(struct gendisk *disk)
5586 mddev_t *mddev = disk->private_data;
5591 static const struct block_device_operations md_fops =
5593 .owner = THIS_MODULE,
5595 .release = md_release,
5597 .getgeo = md_getgeo,
5598 .media_changed = md_media_changed,
5599 .revalidate_disk= md_revalidate,
5602 static int md_thread(void * arg)
5604 mdk_thread_t *thread = arg;
5607 * md_thread is a 'system-thread', it's priority should be very
5608 * high. We avoid resource deadlocks individually in each
5609 * raid personality. (RAID5 does preallocation) We also use RR and
5610 * the very same RT priority as kswapd, thus we will never get
5611 * into a priority inversion deadlock.
5613 * we definitely have to have equal or higher priority than
5614 * bdflush, otherwise bdflush will deadlock if there are too
5615 * many dirty RAID5 blocks.
5618 allow_signal(SIGKILL);
5619 while (!kthread_should_stop()) {
5621 /* We need to wait INTERRUPTIBLE so that
5622 * we don't add to the load-average.
5623 * That means we need to be sure no signals are
5626 if (signal_pending(current))
5627 flush_signals(current);
5629 wait_event_interruptible_timeout
5631 test_bit(THREAD_WAKEUP, &thread->flags)
5632 || kthread_should_stop(),
5635 clear_bit(THREAD_WAKEUP, &thread->flags);
5637 thread->run(thread->mddev);
5643 void md_wakeup_thread(mdk_thread_t *thread)
5646 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5647 set_bit(THREAD_WAKEUP, &thread->flags);
5648 wake_up(&thread->wqueue);
5652 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5655 mdk_thread_t *thread;
5657 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5661 init_waitqueue_head(&thread->wqueue);
5664 thread->mddev = mddev;
5665 thread->timeout = MAX_SCHEDULE_TIMEOUT;
5666 thread->tsk = kthread_run(md_thread, thread,
5668 mdname(thread->mddev),
5669 name ?: mddev->pers->name);
5670 if (IS_ERR(thread->tsk)) {
5677 void md_unregister_thread(mdk_thread_t *thread)
5681 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5683 kthread_stop(thread->tsk);
5687 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5694 if (!rdev || test_bit(Faulty, &rdev->flags))
5697 if (mddev->external)
5698 set_bit(Blocked, &rdev->flags);
5700 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5702 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5703 __builtin_return_address(0),__builtin_return_address(1),
5704 __builtin_return_address(2),__builtin_return_address(3));
5708 if (!mddev->pers->error_handler)
5710 mddev->pers->error_handler(mddev,rdev);
5711 if (mddev->degraded)
5712 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5713 set_bit(StateChanged, &rdev->flags);
5714 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5715 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5716 md_wakeup_thread(mddev->thread);
5717 md_new_event_inintr(mddev);
5720 /* seq_file implementation /proc/mdstat */
5722 static void status_unused(struct seq_file *seq)
5727 seq_printf(seq, "unused devices: ");
5729 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
5730 char b[BDEVNAME_SIZE];
5732 seq_printf(seq, "%s ",
5733 bdevname(rdev->bdev,b));
5736 seq_printf(seq, "<none>");
5738 seq_printf(seq, "\n");
5742 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5744 sector_t max_sectors, resync, res;
5745 unsigned long dt, db;
5748 unsigned int per_milli;
5750 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
5752 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5753 max_sectors = mddev->resync_max_sectors;
5755 max_sectors = mddev->dev_sectors;
5758 * Should not happen.
5764 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5765 * in a sector_t, and (max_sectors>>scale) will fit in a
5766 * u32, as those are the requirements for sector_div.
5767 * Thus 'scale' must be at least 10
5770 if (sizeof(sector_t) > sizeof(unsigned long)) {
5771 while ( max_sectors/2 > (1ULL<<(scale+32)))
5774 res = (resync>>scale)*1000;
5775 sector_div(res, (u32)((max_sectors>>scale)+1));
5779 int i, x = per_milli/50, y = 20-x;
5780 seq_printf(seq, "[");
5781 for (i = 0; i < x; i++)
5782 seq_printf(seq, "=");
5783 seq_printf(seq, ">");
5784 for (i = 0; i < y; i++)
5785 seq_printf(seq, ".");
5786 seq_printf(seq, "] ");
5788 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5789 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5791 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5793 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5794 "resync" : "recovery"))),
5795 per_milli/10, per_milli % 10,
5796 (unsigned long long) resync/2,
5797 (unsigned long long) max_sectors/2);
5800 * dt: time from mark until now
5801 * db: blocks written from mark until now
5802 * rt: remaining time
5804 * rt is a sector_t, so could be 32bit or 64bit.
5805 * So we divide before multiply in case it is 32bit and close
5807 * We scale the divisor (db) by 32 to avoid loosing precision
5808 * near the end of resync when the number of remaining sectors
5810 * We then divide rt by 32 after multiplying by db to compensate.
5811 * The '+1' avoids division by zero if db is very small.
5813 dt = ((jiffies - mddev->resync_mark) / HZ);
5815 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5816 - mddev->resync_mark_cnt;
5818 rt = max_sectors - resync; /* number of remaining sectors */
5819 sector_div(rt, db/32+1);
5823 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
5824 ((unsigned long)rt % 60)/6);
5826 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5829 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5831 struct list_head *tmp;
5841 spin_lock(&all_mddevs_lock);
5842 list_for_each(tmp,&all_mddevs)
5844 mddev = list_entry(tmp, mddev_t, all_mddevs);
5846 spin_unlock(&all_mddevs_lock);
5849 spin_unlock(&all_mddevs_lock);
5851 return (void*)2;/* tail */
5855 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5857 struct list_head *tmp;
5858 mddev_t *next_mddev, *mddev = v;
5864 spin_lock(&all_mddevs_lock);
5866 tmp = all_mddevs.next;
5868 tmp = mddev->all_mddevs.next;
5869 if (tmp != &all_mddevs)
5870 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5872 next_mddev = (void*)2;
5875 spin_unlock(&all_mddevs_lock);
5883 static void md_seq_stop(struct seq_file *seq, void *v)
5887 if (mddev && v != (void*)1 && v != (void*)2)
5891 struct mdstat_info {
5895 static int md_seq_show(struct seq_file *seq, void *v)
5900 struct mdstat_info *mi = seq->private;
5901 struct bitmap *bitmap;
5903 if (v == (void*)1) {
5904 struct mdk_personality *pers;
5905 seq_printf(seq, "Personalities : ");
5906 spin_lock(&pers_lock);
5907 list_for_each_entry(pers, &pers_list, list)
5908 seq_printf(seq, "[%s] ", pers->name);
5910 spin_unlock(&pers_lock);
5911 seq_printf(seq, "\n");
5912 mi->event = atomic_read(&md_event_count);
5915 if (v == (void*)2) {
5920 if (mddev_lock(mddev) < 0)
5923 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5924 seq_printf(seq, "%s : %sactive", mdname(mddev),
5925 mddev->pers ? "" : "in");
5928 seq_printf(seq, " (read-only)");
5930 seq_printf(seq, " (auto-read-only)");
5931 seq_printf(seq, " %s", mddev->pers->name);
5935 list_for_each_entry(rdev, &mddev->disks, same_set) {
5936 char b[BDEVNAME_SIZE];
5937 seq_printf(seq, " %s[%d]",
5938 bdevname(rdev->bdev,b), rdev->desc_nr);
5939 if (test_bit(WriteMostly, &rdev->flags))
5940 seq_printf(seq, "(W)");
5941 if (test_bit(Faulty, &rdev->flags)) {
5942 seq_printf(seq, "(F)");
5944 } else if (rdev->raid_disk < 0)
5945 seq_printf(seq, "(S)"); /* spare */
5946 sectors += rdev->sectors;
5949 if (!list_empty(&mddev->disks)) {
5951 seq_printf(seq, "\n %llu blocks",
5952 (unsigned long long)
5953 mddev->array_sectors / 2);
5955 seq_printf(seq, "\n %llu blocks",
5956 (unsigned long long)sectors / 2);
5958 if (mddev->persistent) {
5959 if (mddev->major_version != 0 ||
5960 mddev->minor_version != 90) {
5961 seq_printf(seq," super %d.%d",
5962 mddev->major_version,
5963 mddev->minor_version);
5965 } else if (mddev->external)
5966 seq_printf(seq, " super external:%s",
5967 mddev->metadata_type);
5969 seq_printf(seq, " super non-persistent");
5972 mddev->pers->status(seq, mddev);
5973 seq_printf(seq, "\n ");
5974 if (mddev->pers->sync_request) {
5975 if (mddev->curr_resync > 2) {
5976 status_resync(seq, mddev);
5977 seq_printf(seq, "\n ");
5978 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5979 seq_printf(seq, "\tresync=DELAYED\n ");
5980 else if (mddev->recovery_cp < MaxSector)
5981 seq_printf(seq, "\tresync=PENDING\n ");
5984 seq_printf(seq, "\n ");
5986 if ((bitmap = mddev->bitmap)) {
5987 unsigned long chunk_kb;
5988 unsigned long flags;
5989 spin_lock_irqsave(&bitmap->lock, flags);
5990 chunk_kb = bitmap->chunksize >> 10;
5991 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5993 bitmap->pages - bitmap->missing_pages,
5995 (bitmap->pages - bitmap->missing_pages)
5996 << (PAGE_SHIFT - 10),
5997 chunk_kb ? chunk_kb : bitmap->chunksize,
5998 chunk_kb ? "KB" : "B");
6000 seq_printf(seq, ", file: ");
6001 seq_path(seq, &bitmap->file->f_path, " \t\n");
6004 seq_printf(seq, "\n");
6005 spin_unlock_irqrestore(&bitmap->lock, flags);
6008 seq_printf(seq, "\n");
6010 mddev_unlock(mddev);
6015 static const struct seq_operations md_seq_ops = {
6016 .start = md_seq_start,
6017 .next = md_seq_next,
6018 .stop = md_seq_stop,
6019 .show = md_seq_show,
6022 static int md_seq_open(struct inode *inode, struct file *file)
6025 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6029 error = seq_open(file, &md_seq_ops);
6033 struct seq_file *p = file->private_data;
6035 mi->event = atomic_read(&md_event_count);
6040 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6042 struct seq_file *m = filp->private_data;
6043 struct mdstat_info *mi = m->private;
6046 poll_wait(filp, &md_event_waiters, wait);
6048 /* always allow read */
6049 mask = POLLIN | POLLRDNORM;
6051 if (mi->event != atomic_read(&md_event_count))
6052 mask |= POLLERR | POLLPRI;
6056 static const struct file_operations md_seq_fops = {
6057 .owner = THIS_MODULE,
6058 .open = md_seq_open,
6060 .llseek = seq_lseek,
6061 .release = seq_release_private,
6062 .poll = mdstat_poll,
6065 int register_md_personality(struct mdk_personality *p)
6067 spin_lock(&pers_lock);
6068 list_add_tail(&p->list, &pers_list);
6069 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6070 spin_unlock(&pers_lock);
6074 int unregister_md_personality(struct mdk_personality *p)
6076 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6077 spin_lock(&pers_lock);
6078 list_del_init(&p->list);
6079 spin_unlock(&pers_lock);
6083 static int is_mddev_idle(mddev_t *mddev, int init)
6091 rdev_for_each_rcu(rdev, mddev) {
6092 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6093 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6094 (int)part_stat_read(&disk->part0, sectors[1]) -
6095 atomic_read(&disk->sync_io);
6096 /* sync IO will cause sync_io to increase before the disk_stats
6097 * as sync_io is counted when a request starts, and
6098 * disk_stats is counted when it completes.
6099 * So resync activity will cause curr_events to be smaller than
6100 * when there was no such activity.
6101 * non-sync IO will cause disk_stat to increase without
6102 * increasing sync_io so curr_events will (eventually)
6103 * be larger than it was before. Once it becomes
6104 * substantially larger, the test below will cause
6105 * the array to appear non-idle, and resync will slow
6107 * If there is a lot of outstanding resync activity when
6108 * we set last_event to curr_events, then all that activity
6109 * completing might cause the array to appear non-idle
6110 * and resync will be slowed down even though there might
6111 * not have been non-resync activity. This will only
6112 * happen once though. 'last_events' will soon reflect
6113 * the state where there is little or no outstanding
6114 * resync requests, and further resync activity will
6115 * always make curr_events less than last_events.
6118 if (init || curr_events - rdev->last_events > 64) {
6119 rdev->last_events = curr_events;
6127 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6129 /* another "blocks" (512byte) blocks have been synced */
6130 atomic_sub(blocks, &mddev->recovery_active);
6131 wake_up(&mddev->recovery_wait);
6133 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6134 md_wakeup_thread(mddev->thread);
6135 // stop recovery, signal do_sync ....
6140 /* md_write_start(mddev, bi)
6141 * If we need to update some array metadata (e.g. 'active' flag
6142 * in superblock) before writing, schedule a superblock update
6143 * and wait for it to complete.
6145 void md_write_start(mddev_t *mddev, struct bio *bi)
6148 if (bio_data_dir(bi) != WRITE)
6151 BUG_ON(mddev->ro == 1);
6152 if (mddev->ro == 2) {
6153 /* need to switch to read/write */
6155 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6156 md_wakeup_thread(mddev->thread);
6157 md_wakeup_thread(mddev->sync_thread);
6160 atomic_inc(&mddev->writes_pending);
6161 if (mddev->safemode == 1)
6162 mddev->safemode = 0;
6163 if (mddev->in_sync) {
6164 spin_lock_irq(&mddev->write_lock);
6165 if (mddev->in_sync) {
6167 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6168 md_wakeup_thread(mddev->thread);
6171 spin_unlock_irq(&mddev->write_lock);
6174 sysfs_notify_dirent(mddev->sysfs_state);
6175 wait_event(mddev->sb_wait,
6176 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6177 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6180 void md_write_end(mddev_t *mddev)
6182 if (atomic_dec_and_test(&mddev->writes_pending)) {
6183 if (mddev->safemode == 2)
6184 md_wakeup_thread(mddev->thread);
6185 else if (mddev->safemode_delay)
6186 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6190 /* md_allow_write(mddev)
6191 * Calling this ensures that the array is marked 'active' so that writes
6192 * may proceed without blocking. It is important to call this before
6193 * attempting a GFP_KERNEL allocation while holding the mddev lock.
6194 * Must be called with mddev_lock held.
6196 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6197 * is dropped, so return -EAGAIN after notifying userspace.
6199 int md_allow_write(mddev_t *mddev)
6205 if (!mddev->pers->sync_request)
6208 spin_lock_irq(&mddev->write_lock);
6209 if (mddev->in_sync) {
6211 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6212 if (mddev->safemode_delay &&
6213 mddev->safemode == 0)
6214 mddev->safemode = 1;
6215 spin_unlock_irq(&mddev->write_lock);
6216 md_update_sb(mddev, 0);
6217 sysfs_notify_dirent(mddev->sysfs_state);
6219 spin_unlock_irq(&mddev->write_lock);
6221 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6226 EXPORT_SYMBOL_GPL(md_allow_write);
6228 #define SYNC_MARKS 10
6229 #define SYNC_MARK_STEP (3*HZ)
6230 void md_do_sync(mddev_t *mddev)
6233 unsigned int currspeed = 0,
6235 sector_t max_sectors,j, io_sectors;
6236 unsigned long mark[SYNC_MARKS];
6237 sector_t mark_cnt[SYNC_MARKS];
6239 struct list_head *tmp;
6240 sector_t last_check;
6245 /* just incase thread restarts... */
6246 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6248 if (mddev->ro) /* never try to sync a read-only array */
6251 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6252 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6253 desc = "data-check";
6254 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6255 desc = "requested-resync";
6258 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6263 /* we overload curr_resync somewhat here.
6264 * 0 == not engaged in resync at all
6265 * 2 == checking that there is no conflict with another sync
6266 * 1 == like 2, but have yielded to allow conflicting resync to
6268 * other == active in resync - this many blocks
6270 * Before starting a resync we must have set curr_resync to
6271 * 2, and then checked that every "conflicting" array has curr_resync
6272 * less than ours. When we find one that is the same or higher
6273 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
6274 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6275 * This will mean we have to start checking from the beginning again.
6280 mddev->curr_resync = 2;
6283 if (kthread_should_stop()) {
6284 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6287 for_each_mddev(mddev2, tmp) {
6288 if (mddev2 == mddev)
6290 if (!mddev->parallel_resync
6291 && mddev2->curr_resync
6292 && match_mddev_units(mddev, mddev2)) {
6294 if (mddev < mddev2 && mddev->curr_resync == 2) {
6295 /* arbitrarily yield */
6296 mddev->curr_resync = 1;
6297 wake_up(&resync_wait);
6299 if (mddev > mddev2 && mddev->curr_resync == 1)
6300 /* no need to wait here, we can wait the next
6301 * time 'round when curr_resync == 2
6304 /* We need to wait 'interruptible' so as not to
6305 * contribute to the load average, and not to
6306 * be caught by 'softlockup'
6308 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6309 if (!kthread_should_stop() &&
6310 mddev2->curr_resync >= mddev->curr_resync) {
6311 printk(KERN_INFO "md: delaying %s of %s"
6312 " until %s has finished (they"
6313 " share one or more physical units)\n",
6314 desc, mdname(mddev), mdname(mddev2));
6316 if (signal_pending(current))
6317 flush_signals(current);
6319 finish_wait(&resync_wait, &wq);
6322 finish_wait(&resync_wait, &wq);
6325 } while (mddev->curr_resync < 2);
6328 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6329 /* resync follows the size requested by the personality,
6330 * which defaults to physical size, but can be virtual size
6332 max_sectors = mddev->resync_max_sectors;
6333 mddev->resync_mismatches = 0;
6334 /* we don't use the checkpoint if there's a bitmap */
6335 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6336 j = mddev->resync_min;
6337 else if (!mddev->bitmap)
6338 j = mddev->recovery_cp;
6340 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6341 max_sectors = mddev->dev_sectors;
6343 /* recovery follows the physical size of devices */
6344 max_sectors = mddev->dev_sectors;
6346 list_for_each_entry(rdev, &mddev->disks, same_set)
6347 if (rdev->raid_disk >= 0 &&
6348 !test_bit(Faulty, &rdev->flags) &&
6349 !test_bit(In_sync, &rdev->flags) &&
6350 rdev->recovery_offset < j)
6351 j = rdev->recovery_offset;
6354 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6355 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
6356 " %d KB/sec/disk.\n", speed_min(mddev));
6357 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6358 "(but not more than %d KB/sec) for %s.\n",
6359 speed_max(mddev), desc);
6361 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6364 for (m = 0; m < SYNC_MARKS; m++) {
6366 mark_cnt[m] = io_sectors;
6369 mddev->resync_mark = mark[last_mark];
6370 mddev->resync_mark_cnt = mark_cnt[last_mark];
6373 * Tune reconstruction:
6375 window = 32*(PAGE_SIZE/512);
6376 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6377 window/2,(unsigned long long) max_sectors/2);
6379 atomic_set(&mddev->recovery_active, 0);
6384 "md: resuming %s of %s from checkpoint.\n",
6385 desc, mdname(mddev));
6386 mddev->curr_resync = j;
6389 while (j < max_sectors) {
6394 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6395 ((mddev->curr_resync > mddev->curr_resync_completed &&
6396 (mddev->curr_resync - mddev->curr_resync_completed)
6397 > (max_sectors >> 4)) ||
6398 (j - mddev->curr_resync_completed)*2
6399 >= mddev->resync_max - mddev->curr_resync_completed
6401 /* time to update curr_resync_completed */
6402 blk_unplug(mddev->queue);
6403 wait_event(mddev->recovery_wait,
6404 atomic_read(&mddev->recovery_active) == 0);
6405 mddev->curr_resync_completed =
6407 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6408 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6411 while (j >= mddev->resync_max && !kthread_should_stop()) {
6412 /* As this condition is controlled by user-space,
6413 * we can block indefinitely, so use '_interruptible'
6414 * to avoid triggering warnings.
6416 flush_signals(current); /* just in case */
6417 wait_event_interruptible(mddev->recovery_wait,
6418 mddev->resync_max > j
6419 || kthread_should_stop());
6422 if (kthread_should_stop())
6425 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6426 currspeed < speed_min(mddev));
6428 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6432 if (!skipped) { /* actual IO requested */
6433 io_sectors += sectors;
6434 atomic_add(sectors, &mddev->recovery_active);
6438 if (j>1) mddev->curr_resync = j;
6439 mddev->curr_mark_cnt = io_sectors;
6440 if (last_check == 0)
6441 /* this is the earliers that rebuilt will be
6442 * visible in /proc/mdstat
6444 md_new_event(mddev);
6446 if (last_check + window > io_sectors || j == max_sectors)
6449 last_check = io_sectors;
6451 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6455 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6457 int next = (last_mark+1) % SYNC_MARKS;
6459 mddev->resync_mark = mark[next];
6460 mddev->resync_mark_cnt = mark_cnt[next];
6461 mark[next] = jiffies;
6462 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6467 if (kthread_should_stop())
6472 * this loop exits only if either when we are slower than
6473 * the 'hard' speed limit, or the system was IO-idle for
6475 * the system might be non-idle CPU-wise, but we only care
6476 * about not overloading the IO subsystem. (things like an
6477 * e2fsck being done on the RAID array should execute fast)
6479 blk_unplug(mddev->queue);
6482 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6483 /((jiffies-mddev->resync_mark)/HZ +1) +1;
6485 if (currspeed > speed_min(mddev)) {
6486 if ((currspeed > speed_max(mddev)) ||
6487 !is_mddev_idle(mddev, 0)) {
6493 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6495 * this also signals 'finished resyncing' to md_stop
6498 blk_unplug(mddev->queue);
6500 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6502 /* tell personality that we are finished */
6503 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6505 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6506 mddev->curr_resync > 2) {
6507 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6508 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6509 if (mddev->curr_resync >= mddev->recovery_cp) {
6511 "md: checkpointing %s of %s.\n",
6512 desc, mdname(mddev));
6513 mddev->recovery_cp = mddev->curr_resync;
6516 mddev->recovery_cp = MaxSector;
6518 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6519 mddev->curr_resync = MaxSector;
6520 list_for_each_entry(rdev, &mddev->disks, same_set)
6521 if (rdev->raid_disk >= 0 &&
6522 !test_bit(Faulty, &rdev->flags) &&
6523 !test_bit(In_sync, &rdev->flags) &&
6524 rdev->recovery_offset < mddev->curr_resync)
6525 rdev->recovery_offset = mddev->curr_resync;
6528 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6531 mddev->curr_resync = 0;
6532 mddev->curr_resync_completed = 0;
6533 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6534 /* We completed so max setting can be forgotten. */
6535 mddev->resync_max = MaxSector;
6536 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6537 wake_up(&resync_wait);
6538 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6539 md_wakeup_thread(mddev->thread);
6544 * got a signal, exit.
6547 "md: md_do_sync() got signal ... exiting\n");
6548 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6552 EXPORT_SYMBOL_GPL(md_do_sync);
6555 static int remove_and_add_spares(mddev_t *mddev)
6560 mddev->curr_resync_completed = 0;
6562 list_for_each_entry(rdev, &mddev->disks, same_set)
6563 if (rdev->raid_disk >= 0 &&
6564 !test_bit(Blocked, &rdev->flags) &&
6565 (test_bit(Faulty, &rdev->flags) ||
6566 ! test_bit(In_sync, &rdev->flags)) &&
6567 atomic_read(&rdev->nr_pending)==0) {
6568 if (mddev->pers->hot_remove_disk(
6569 mddev, rdev->raid_disk)==0) {
6571 sprintf(nm,"rd%d", rdev->raid_disk);
6572 sysfs_remove_link(&mddev->kobj, nm);
6573 rdev->raid_disk = -1;
6577 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6578 list_for_each_entry(rdev, &mddev->disks, same_set) {
6579 if (rdev->raid_disk >= 0 &&
6580 !test_bit(In_sync, &rdev->flags) &&
6581 !test_bit(Blocked, &rdev->flags))
6583 if (rdev->raid_disk < 0
6584 && !test_bit(Faulty, &rdev->flags)) {
6585 rdev->recovery_offset = 0;
6587 hot_add_disk(mddev, rdev) == 0) {
6589 sprintf(nm, "rd%d", rdev->raid_disk);
6590 if (sysfs_create_link(&mddev->kobj,
6593 "md: cannot register "
6597 md_new_event(mddev);
6606 * This routine is regularly called by all per-raid-array threads to
6607 * deal with generic issues like resync and super-block update.
6608 * Raid personalities that don't have a thread (linear/raid0) do not
6609 * need this as they never do any recovery or update the superblock.
6611 * It does not do any resync itself, but rather "forks" off other threads
6612 * to do that as needed.
6613 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6614 * "->recovery" and create a thread at ->sync_thread.
6615 * When the thread finishes it sets MD_RECOVERY_DONE
6616 * and wakeups up this thread which will reap the thread and finish up.
6617 * This thread also removes any faulty devices (with nr_pending == 0).
6619 * The overall approach is:
6620 * 1/ if the superblock needs updating, update it.
6621 * 2/ If a recovery thread is running, don't do anything else.
6622 * 3/ If recovery has finished, clean up, possibly marking spares active.
6623 * 4/ If there are any faulty devices, remove them.
6624 * 5/ If array is degraded, try to add spares devices
6625 * 6/ If array has spares or is not in-sync, start a resync thread.
6627 void md_check_recovery(mddev_t *mddev)
6633 bitmap_daemon_work(mddev);
6638 if (signal_pending(current)) {
6639 if (mddev->pers->sync_request && !mddev->external) {
6640 printk(KERN_INFO "md: %s in immediate safe mode\n",
6642 mddev->safemode = 2;
6644 flush_signals(current);
6647 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6650 (mddev->flags && !mddev->external) ||
6651 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6652 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6653 (mddev->external == 0 && mddev->safemode == 1) ||
6654 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6655 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6659 if (mddev_trylock(mddev)) {
6663 /* Only thing we do on a ro array is remove
6666 remove_and_add_spares(mddev);
6667 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6671 if (!mddev->external) {
6673 spin_lock_irq(&mddev->write_lock);
6674 if (mddev->safemode &&
6675 !atomic_read(&mddev->writes_pending) &&
6677 mddev->recovery_cp == MaxSector) {
6680 if (mddev->persistent)
6681 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6683 if (mddev->safemode == 1)
6684 mddev->safemode = 0;
6685 spin_unlock_irq(&mddev->write_lock);
6687 sysfs_notify_dirent(mddev->sysfs_state);
6691 md_update_sb(mddev, 0);
6693 list_for_each_entry(rdev, &mddev->disks, same_set)
6694 if (test_and_clear_bit(StateChanged, &rdev->flags))
6695 sysfs_notify_dirent(rdev->sysfs_state);
6698 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6699 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6700 /* resync/recovery still happening */
6701 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6704 if (mddev->sync_thread) {
6705 /* resync has finished, collect result */
6706 md_unregister_thread(mddev->sync_thread);
6707 mddev->sync_thread = NULL;
6708 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6709 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6711 /* activate any spares */
6712 if (mddev->pers->spare_active(mddev))
6713 sysfs_notify(&mddev->kobj, NULL,
6716 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6717 mddev->pers->finish_reshape)
6718 mddev->pers->finish_reshape(mddev);
6719 md_update_sb(mddev, 1);
6721 /* if array is no-longer degraded, then any saved_raid_disk
6722 * information must be scrapped
6724 if (!mddev->degraded)
6725 list_for_each_entry(rdev, &mddev->disks, same_set)
6726 rdev->saved_raid_disk = -1;
6728 mddev->recovery = 0;
6729 /* flag recovery needed just to double check */
6730 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6731 sysfs_notify_dirent(mddev->sysfs_action);
6732 md_new_event(mddev);
6735 /* Set RUNNING before clearing NEEDED to avoid
6736 * any transients in the value of "sync_action".
6738 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6739 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6740 /* Clear some bits that don't mean anything, but
6743 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6744 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6746 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6748 /* no recovery is running.
6749 * remove any failed drives, then
6750 * add spares if possible.
6751 * Spare are also removed and re-added, to allow
6752 * the personality to fail the re-add.
6755 if (mddev->reshape_position != MaxSector) {
6756 if (mddev->pers->check_reshape == NULL ||
6757 mddev->pers->check_reshape(mddev) != 0)
6758 /* Cannot proceed */
6760 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6761 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6762 } else if ((spares = remove_and_add_spares(mddev))) {
6763 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6764 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6765 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6766 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6767 } else if (mddev->recovery_cp < MaxSector) {
6768 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6769 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6770 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6771 /* nothing to be done ... */
6774 if (mddev->pers->sync_request) {
6775 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6776 /* We are adding a device or devices to an array
6777 * which has the bitmap stored on all devices.
6778 * So make sure all bitmap pages get written
6780 bitmap_write_all(mddev->bitmap);
6782 mddev->sync_thread = md_register_thread(md_do_sync,
6785 if (!mddev->sync_thread) {
6786 printk(KERN_ERR "%s: could not start resync"
6789 /* leave the spares where they are, it shouldn't hurt */
6790 mddev->recovery = 0;
6792 md_wakeup_thread(mddev->sync_thread);
6793 sysfs_notify_dirent(mddev->sysfs_action);
6794 md_new_event(mddev);
6797 if (!mddev->sync_thread) {
6798 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6799 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6801 if (mddev->sysfs_action)
6802 sysfs_notify_dirent(mddev->sysfs_action);
6804 mddev_unlock(mddev);
6808 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6810 sysfs_notify_dirent(rdev->sysfs_state);
6811 wait_event_timeout(rdev->blocked_wait,
6812 !test_bit(Blocked, &rdev->flags),
6813 msecs_to_jiffies(5000));
6814 rdev_dec_pending(rdev, mddev);
6816 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6818 static int md_notify_reboot(struct notifier_block *this,
6819 unsigned long code, void *x)
6821 struct list_head *tmp;
6824 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6826 printk(KERN_INFO "md: stopping all md devices.\n");
6828 for_each_mddev(mddev, tmp)
6829 if (mddev_trylock(mddev)) {
6830 /* Force a switch to readonly even array
6831 * appears to still be in use. Hence
6834 do_md_stop(mddev, 1, 100);
6835 mddev_unlock(mddev);
6838 * certain more exotic SCSI devices are known to be
6839 * volatile wrt too early system reboots. While the
6840 * right place to handle this issue is the given
6841 * driver, we do want to have a safe RAID driver ...
6848 static struct notifier_block md_notifier = {
6849 .notifier_call = md_notify_reboot,
6851 .priority = INT_MAX, /* before any real devices */
6854 static void md_geninit(void)
6856 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6858 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6861 static int __init md_init(void)
6863 if (register_blkdev(MD_MAJOR, "md"))
6865 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6866 unregister_blkdev(MD_MAJOR, "md");
6869 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
6870 md_probe, NULL, NULL);
6871 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6872 md_probe, NULL, NULL);
6874 register_reboot_notifier(&md_notifier);
6875 raid_table_header = register_sysctl_table(raid_root_table);
6885 * Searches all registered partitions for autorun RAID arrays
6889 static LIST_HEAD(all_detected_devices);
6890 struct detected_devices_node {
6891 struct list_head list;
6895 void md_autodetect_dev(dev_t dev)
6897 struct detected_devices_node *node_detected_dev;
6899 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6900 if (node_detected_dev) {
6901 node_detected_dev->dev = dev;
6902 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6904 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6905 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6910 static void autostart_arrays(int part)
6913 struct detected_devices_node *node_detected_dev;
6915 int i_scanned, i_passed;
6920 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6922 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6924 node_detected_dev = list_entry(all_detected_devices.next,
6925 struct detected_devices_node, list);
6926 list_del(&node_detected_dev->list);
6927 dev = node_detected_dev->dev;
6928 kfree(node_detected_dev);
6929 rdev = md_import_device(dev,0, 90);
6933 if (test_bit(Faulty, &rdev->flags)) {
6937 set_bit(AutoDetected, &rdev->flags);
6938 list_add(&rdev->same_set, &pending_raid_disks);
6942 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6943 i_scanned, i_passed);
6945 autorun_devices(part);
6948 #endif /* !MODULE */
6950 static __exit void md_exit(void)
6953 struct list_head *tmp;
6955 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
6956 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6958 unregister_blkdev(MD_MAJOR,"md");
6959 unregister_blkdev(mdp_major, "mdp");
6960 unregister_reboot_notifier(&md_notifier);
6961 unregister_sysctl_table(raid_table_header);
6962 remove_proc_entry("mdstat", NULL);
6963 for_each_mddev(mddev, tmp) {
6964 export_array(mddev);
6965 mddev->hold_active = 0;
6969 subsys_initcall(md_init);
6970 module_exit(md_exit)
6972 static int get_ro(char *buffer, struct kernel_param *kp)
6974 return sprintf(buffer, "%d", start_readonly);
6976 static int set_ro(const char *val, struct kernel_param *kp)
6979 int num = simple_strtoul(val, &e, 10);
6980 if (*val && (*e == '\0' || *e == '\n')) {
6981 start_readonly = num;
6987 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6988 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6990 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
6992 EXPORT_SYMBOL(register_md_personality);
6993 EXPORT_SYMBOL(unregister_md_personality);
6994 EXPORT_SYMBOL(md_error);
6995 EXPORT_SYMBOL(md_done_sync);
6996 EXPORT_SYMBOL(md_write_start);
6997 EXPORT_SYMBOL(md_write_end);
6998 EXPORT_SYMBOL(md_register_thread);
6999 EXPORT_SYMBOL(md_unregister_thread);
7000 EXPORT_SYMBOL(md_wakeup_thread);
7001 EXPORT_SYMBOL(md_check_recovery);
7002 MODULE_LICENSE("GPL");
7004 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);