Lines Matching refs:mddev

92 static int remove_and_add_spares(struct mddev *mddev,
94 static void mddev_detach(struct mddev *mddev);
119 static inline int speed_min(struct mddev *mddev) in speed_min() argument
121 return mddev->sync_speed_min ? in speed_min()
122 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
125 static inline int speed_max(struct mddev *mddev) in speed_max() argument
127 return mddev->sync_speed_max ? in speed_max()
128 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
140 static void rdevs_uninit_serial(struct mddev *mddev) in rdevs_uninit_serial() argument
144 rdev_for_each(rdev, mddev) in rdevs_uninit_serial()
176 static int rdevs_init_serial(struct mddev *mddev) in rdevs_init_serial() argument
181 rdev_for_each(rdev, mddev) { in rdevs_init_serial()
188 if (ret && !mddev->serial_info_pool) in rdevs_init_serial()
189 rdevs_uninit_serial(mddev); in rdevs_init_serial()
201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && in rdev_need_serial()
211 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_create_serial_pool() argument
221 mddev_suspend(mddev); in mddev_create_serial_pool()
224 ret = rdevs_init_serial(mddev); in mddev_create_serial_pool()
230 if (mddev->serial_info_pool == NULL) { in mddev_create_serial_pool()
235 mddev->serial_info_pool = in mddev_create_serial_pool()
238 if (!mddev->serial_info_pool) { in mddev_create_serial_pool()
239 rdevs_uninit_serial(mddev); in mddev_create_serial_pool()
246 mddev_resume(mddev); in mddev_create_serial_pool()
255 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_destroy_serial_pool() argument
261 if (mddev->serial_info_pool) { in mddev_destroy_serial_pool()
266 mddev_suspend(mddev); in mddev_destroy_serial_pool()
267 rdev_for_each(temp, mddev) { in mddev_destroy_serial_pool()
269 if (!mddev->serialize_policy || in mddev_destroy_serial_pool()
285 mempool_destroy(mddev->serial_info_pool); in mddev_destroy_serial_pool()
286 mddev->serial_info_pool = NULL; in mddev_destroy_serial_pool()
289 mddev_resume(mddev); in mddev_destroy_serial_pool()
378 static bool is_suspended(struct mddev *mddev, struct bio *bio) in is_suspended() argument
380 if (mddev->suspended) in is_suspended()
384 if (mddev->suspend_lo >= mddev->suspend_hi) in is_suspended()
386 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) in is_suspended()
388 if (bio_end_sector(bio) < mddev->suspend_lo) in is_suspended()
393 void md_handle_request(struct mddev *mddev, struct bio *bio) in md_handle_request() argument
397 if (is_suspended(mddev, bio)) { in md_handle_request()
406 prepare_to_wait(&mddev->sb_wait, &__wait, in md_handle_request()
408 if (!is_suspended(mddev, bio)) in md_handle_request()
414 finish_wait(&mddev->sb_wait, &__wait); in md_handle_request()
416 atomic_inc(&mddev->active_io); in md_handle_request()
419 if (!mddev->pers->make_request(mddev, bio)) { in md_handle_request()
420 atomic_dec(&mddev->active_io); in md_handle_request()
421 wake_up(&mddev->sb_wait); in md_handle_request()
425 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) in md_handle_request()
426 wake_up(&mddev->sb_wait); in md_handle_request()
433 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; in md_submit_bio() local
435 if (mddev == NULL || mddev->pers == NULL) { in md_submit_bio()
440 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { in md_submit_bio()
449 if (mddev->ro == 1 && unlikely(rw == WRITE)) { in md_submit_bio()
459 md_handle_request(mddev, bio); in md_submit_bio()
468 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
470 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); in mddev_suspend()
471 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_suspend()
472 if (mddev->suspended++) in mddev_suspend()
475 wake_up(&mddev->sb_wait); in mddev_suspend()
476 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
478 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); in mddev_suspend()
479 mddev->pers->quiesce(mddev, 1); in mddev_suspend()
480 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
481 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); in mddev_suspend()
483 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
485 mddev->noio_flag = memalloc_noio_save(); in mddev_suspend()
489 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
492 memalloc_noio_restore(mddev->noio_flag); in mddev_resume()
493 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_resume()
494 if (--mddev->suspended) in mddev_resume()
496 wake_up(&mddev->sb_wait); in mddev_resume()
497 mddev->pers->quiesce(mddev, 0); in mddev_resume()
499 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
500 md_wakeup_thread(mddev->thread); in mddev_resume()
501 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
512 struct mddev *mddev = rdev->mddev; in md_end_flush() local
516 rdev_dec_pending(rdev, mddev); in md_end_flush()
518 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
520 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
528 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
531 mddev->start_flush = ktime_get_boottime(); in submit_flushes()
532 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
533 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
535 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
548 GFP_NOIO, &mddev->bio_set); in submit_flushes()
551 atomic_inc(&mddev->flush_pending); in submit_flushes()
554 rdev_dec_pending(rdev, mddev); in submit_flushes()
557 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
558 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
563 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
564 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
572 spin_lock_irq(&mddev->lock); in md_submit_flush_data()
573 mddev->prev_flush_start = mddev->start_flush; in md_submit_flush_data()
574 mddev->flush_bio = NULL; in md_submit_flush_data()
575 spin_unlock_irq(&mddev->lock); in md_submit_flush_data()
576 wake_up(&mddev->sb_wait); in md_submit_flush_data()
583 md_handle_request(mddev, bio); in md_submit_flush_data()
593 bool md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
596 spin_lock_irq(&mddev->lock); in md_flush_request()
600 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
601 !mddev->flush_bio || in md_flush_request()
602 ktime_before(req_start, mddev->prev_flush_start), in md_flush_request()
603 mddev->lock); in md_flush_request()
605 if (ktime_after(req_start, mddev->prev_flush_start)) { in md_flush_request()
606 WARN_ON(mddev->flush_bio); in md_flush_request()
607 mddev->flush_bio = bio; in md_flush_request()
610 spin_unlock_irq(&mddev->lock); in md_flush_request()
613 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
614 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
629 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
633 if (test_bit(MD_DELETED, &mddev->flags)) in mddev_get()
635 atomic_inc(&mddev->active); in mddev_get()
636 return mddev; in mddev_get()
641 void mddev_put(struct mddev *mddev) in mddev_put() argument
643 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
645 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
646 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
649 set_bit(MD_DELETED, &mddev->flags); in mddev_put()
656 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
657 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
664 void mddev_init(struct mddev *mddev) in mddev_init() argument
666 mutex_init(&mddev->open_mutex); in mddev_init()
667 mutex_init(&mddev->reconfig_mutex); in mddev_init()
668 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
669 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
670 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
671 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); in mddev_init()
672 atomic_set(&mddev->active, 1); in mddev_init()
673 atomic_set(&mddev->openers, 0); in mddev_init()
674 atomic_set(&mddev->active_io, 0); in mddev_init()
675 spin_lock_init(&mddev->lock); in mddev_init()
676 atomic_set(&mddev->flush_pending, 0); in mddev_init()
677 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
678 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
679 mddev->reshape_position = MaxSector; in mddev_init()
680 mddev->reshape_backwards = 0; in mddev_init()
681 mddev->last_sync_action = "none"; in mddev_init()
682 mddev->resync_min = 0; in mddev_init()
683 mddev->resync_max = MaxSector; in mddev_init()
684 mddev->level = LEVEL_NONE; in mddev_init()
688 static struct mddev *mddev_find_locked(dev_t unit) in mddev_find_locked()
690 struct mddev *mddev; in mddev_find_locked() local
692 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find_locked()
693 if (mddev->unit == unit) in mddev_find_locked()
694 return mddev; in mddev_find_locked()
720 static struct mddev *mddev_alloc(dev_t unit) in mddev_alloc()
722 struct mddev *new; in mddev_alloc()
762 static void mddev_free(struct mddev *mddev) in mddev_free() argument
765 list_del(&mddev->all_mddevs); in mddev_free()
768 kfree(mddev); in mddev_free()
773 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
775 if (mddev->to_remove) { in mddev_unlock()
788 const struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
789 mddev->to_remove = NULL; in mddev_unlock()
790 mddev->sysfs_active = 1; in mddev_unlock()
791 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
793 if (mddev->kobj.sd) { in mddev_unlock()
795 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
796 if (mddev->pers == NULL || in mddev_unlock()
797 mddev->pers->sync_request == NULL) { in mddev_unlock()
798 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
799 if (mddev->sysfs_action) in mddev_unlock()
800 sysfs_put(mddev->sysfs_action); in mddev_unlock()
801 if (mddev->sysfs_completed) in mddev_unlock()
802 sysfs_put(mddev->sysfs_completed); in mddev_unlock()
803 if (mddev->sysfs_degraded) in mddev_unlock()
804 sysfs_put(mddev->sysfs_degraded); in mddev_unlock()
805 mddev->sysfs_action = NULL; in mddev_unlock()
806 mddev->sysfs_completed = NULL; in mddev_unlock()
807 mddev->sysfs_degraded = NULL; in mddev_unlock()
810 mddev->sysfs_active = 0; in mddev_unlock()
812 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
818 md_wakeup_thread(mddev->thread); in mddev_unlock()
819 wake_up(&mddev->sb_wait); in mddev_unlock()
824 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
828 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
836 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
840 rdev_for_each(rdev, mddev) in find_rdev()
847 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) in md_find_rdev_rcu() argument
851 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_rcu()
905 struct mddev *mddev = rdev->mddev; in super_written() local
910 md_error(mddev, rdev); in super_written()
913 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); in super_written()
921 rdev_dec_pending(rdev, mddev); in super_written()
923 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
924 wake_up(&mddev->sb_wait); in super_written()
927 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
947 GFP_NOIO, &mddev->sync_set); in md_super_write()
956 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && in md_super_write()
961 atomic_inc(&mddev->pending_writes); in md_super_write()
965 int md_super_wait(struct mddev *mddev) in md_super_wait() argument
968 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
969 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) in md_super_wait()
987 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
988 (rdev->mddev->reshape_backwards == in sync_page_io()
989 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
1126 int (*validate_super)(struct mddev *mddev,
1128 void (*sync_super)(struct mddev *mddev,
1144 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
1146 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
1149 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1265 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1277 if (mddev->raid_disks == 0) { in super_90_validate()
1278 mddev->major_version = 0; in super_90_validate()
1279 mddev->minor_version = sb->minor_version; in super_90_validate()
1280 mddev->patch_version = sb->patch_version; in super_90_validate()
1281 mddev->external = 0; in super_90_validate()
1282 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1283 mddev->ctime = sb->ctime; in super_90_validate()
1284 mddev->utime = sb->utime; in super_90_validate()
1285 mddev->level = sb->level; in super_90_validate()
1286 mddev->clevel[0] = 0; in super_90_validate()
1287 mddev->layout = sb->layout; in super_90_validate()
1288 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1289 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1290 mddev->events = ev1; in super_90_validate()
1291 mddev->bitmap_info.offset = 0; in super_90_validate()
1292 mddev->bitmap_info.space = 0; in super_90_validate()
1294 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1295 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1296 mddev->reshape_backwards = 0; in super_90_validate()
1298 if (mddev->minor_version >= 91) { in super_90_validate()
1299 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1300 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1301 mddev->new_level = sb->new_level; in super_90_validate()
1302 mddev->new_layout = sb->new_layout; in super_90_validate()
1303 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1304 if (mddev->delta_disks < 0) in super_90_validate()
1305 mddev->reshape_backwards = 1; in super_90_validate()
1307 mddev->reshape_position = MaxSector; in super_90_validate()
1308 mddev->delta_disks = 0; in super_90_validate()
1309 mddev->new_level = mddev->level; in super_90_validate()
1310 mddev->new_layout = mddev->layout; in super_90_validate()
1311 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1313 if (mddev->level == 0) in super_90_validate()
1314 mddev->layout = -1; in super_90_validate()
1317 mddev->recovery_cp = MaxSector; in super_90_validate()
1321 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1323 mddev->recovery_cp = 0; in super_90_validate()
1326 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1327 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1328 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1329 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1331 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1334 mddev->bitmap_info.file == NULL) { in super_90_validate()
1335 mddev->bitmap_info.offset = in super_90_validate()
1336 mddev->bitmap_info.default_offset; in super_90_validate()
1337 mddev->bitmap_info.space = in super_90_validate()
1338 mddev->bitmap_info.default_space; in super_90_validate()
1341 } else if (mddev->pers == NULL) { in super_90_validate()
1347 if (ev1 < mddev->events) in super_90_validate()
1349 } else if (mddev->bitmap) { in super_90_validate()
1353 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1355 if (ev1 < mddev->events) in super_90_validate()
1358 if (ev1 < mddev->events) in super_90_validate()
1363 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1377 if (mddev->minor_version >= 91) { in super_90_validate()
1394 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1398 int next_spare = mddev->raid_disks; in super_90_sync()
1420 sb->major_version = mddev->major_version; in super_90_sync()
1421 sb->patch_version = mddev->patch_version; in super_90_sync()
1423 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1424 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1425 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1426 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1428 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in super_90_sync()
1429 sb->level = mddev->level; in super_90_sync()
1430 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1431 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1432 sb->md_minor = mddev->md_minor; in super_90_sync()
1434 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in super_90_sync()
1436 sb->events_hi = (mddev->events>>32); in super_90_sync()
1437 sb->events_lo = (u32)mddev->events; in super_90_sync()
1439 if (mddev->reshape_position == MaxSector) in super_90_sync()
1443 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1444 sb->new_level = mddev->new_level; in super_90_sync()
1445 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1446 sb->new_layout = mddev->new_layout; in super_90_sync()
1447 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1449 mddev->minor_version = sb->minor_version; in super_90_sync()
1450 if (mddev->in_sync) in super_90_sync()
1452 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1453 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1454 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1455 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1460 sb->layout = mddev->layout; in super_90_sync()
1461 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1463 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1467 rdev_for_each(rdev2, mddev) { in super_90_sync()
1515 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1541 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1543 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1551 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1554 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1556 } while (md_super_wait(rdev->mddev) < 0); in super_90_rdev_size_change()
1777 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1788 if (mddev->raid_disks == 0) { in super_1_validate()
1789 mddev->major_version = 1; in super_1_validate()
1790 mddev->patch_version = 0; in super_1_validate()
1791 mddev->external = 0; in super_1_validate()
1792 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1793 mddev->ctime = le64_to_cpu(sb->ctime); in super_1_validate()
1794 mddev->utime = le64_to_cpu(sb->utime); in super_1_validate()
1795 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1796 mddev->clevel[0] = 0; in super_1_validate()
1797 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1798 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1799 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1800 mddev->events = ev1; in super_1_validate()
1801 mddev->bitmap_info.offset = 0; in super_1_validate()
1802 mddev->bitmap_info.space = 0; in super_1_validate()
1806 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1807 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1808 mddev->reshape_backwards = 0; in super_1_validate()
1810 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1811 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1813 mddev->max_disks = (4096-256)/2; in super_1_validate()
1816 mddev->bitmap_info.file == NULL) { in super_1_validate()
1817 mddev->bitmap_info.offset = in super_1_validate()
1824 if (mddev->minor_version > 0) in super_1_validate()
1825 mddev->bitmap_info.space = 0; in super_1_validate()
1826 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1827 mddev->bitmap_info.space = in super_1_validate()
1828 8 - mddev->bitmap_info.offset; in super_1_validate()
1830 mddev->bitmap_info.space = in super_1_validate()
1831 -mddev->bitmap_info.offset; in super_1_validate()
1835 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1836 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1837 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1838 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1839 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1840 if (mddev->delta_disks < 0 || in super_1_validate()
1841 (mddev->delta_disks == 0 && in super_1_validate()
1844 mddev->reshape_backwards = 1; in super_1_validate()
1846 mddev->reshape_position = MaxSector; in super_1_validate()
1847 mddev->delta_disks = 0; in super_1_validate()
1848 mddev->new_level = mddev->level; in super_1_validate()
1849 mddev->new_layout = mddev->layout; in super_1_validate()
1850 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1853 if (mddev->level == 0 && in super_1_validate()
1855 mddev->layout = -1; in super_1_validate()
1858 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1869 set_bit(MD_HAS_PPL, &mddev->flags); in super_1_validate()
1871 } else if (mddev->pers == NULL) { in super_1_validate()
1879 if (ev1 < mddev->events) in super_1_validate()
1881 } else if (mddev->bitmap) { in super_1_validate()
1885 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1887 if (ev1 < mddev->events) in super_1_validate()
1890 if (ev1 < mddev->events) in super_1_validate()
1894 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1932 &mddev->recovery)) in super_1_validate()
1950 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
1964 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
1965 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
1966 if (mddev->in_sync) in super_1_sync()
1967 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
1968 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
1975 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
1976 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
1977 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
1978 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
1979 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
1992 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
1993 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
2003 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
2014 if (mddev->reshape_position != MaxSector) { in super_1_sync()
2016 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
2017 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
2018 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
2019 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
2020 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
2021 if (mddev->delta_disks == 0 && in super_1_sync()
2022 mddev->reshape_backwards) in super_1_sync()
2033 if (mddev_is_clustered(mddev)) in super_1_sync()
2040 md_error(mddev, rdev); in super_1_sync()
2071 rdev_for_each(rdev2, mddev) in super_1_sync()
2088 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
2091 if (test_bit(MD_HAS_PPL, &mddev->flags)) { in super_1_sync()
2092 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) in super_1_sync()
2101 rdev_for_each(rdev2, mddev) { in super_1_sync()
2141 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2150 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
2178 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
2180 } while (md_super_wait(rdev->mddev) < 0); in super_1_rdev_size_change()
2196 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
2207 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
2208 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
2209 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
2239 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
2241 if (mddev->sync_super) { in sync_super()
2242 mddev->sync_super(mddev, rdev); in sync_super()
2246 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
2248 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
2251 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
2285 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
2289 if (list_empty(&mddev->disks)) in md_integrity_register()
2291 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
2293 rdev_for_each(rdev, mddev) { in md_integrity_register()
2315 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2318 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2319 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || in md_integrity_register()
2320 (mddev->level != 1 && mddev->level != 10 && in md_integrity_register()
2321 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) { in md_integrity_register()
2329 mdname(mddev)); in md_integrity_register()
2340 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2344 if (!mddev->gendisk) in md_integrity_add_rdev()
2347 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2352 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2354 mdname(mddev), rdev->bdev); in md_integrity_add_rdev()
2368 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2374 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2377 if (rdev_read_only(rdev) && mddev->pers) in bind_rdev_to_array()
2383 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2384 if (mddev->pers) { in bind_rdev_to_array()
2389 if (mddev->level > 0) in bind_rdev_to_array()
2392 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2402 if (mddev->pers) in bind_rdev_to_array()
2403 choice = mddev->raid_disks; in bind_rdev_to_array()
2404 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2408 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2415 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2417 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2423 rdev->mddev = mddev; in bind_rdev_to_array()
2426 if (mddev->raid_disks) in bind_rdev_to_array()
2427 mddev_create_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2429 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2440 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2441 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2444 mddev->recovery_disabled++; in bind_rdev_to_array()
2450 b, mdname(mddev)); in bind_rdev_to_array()
2463 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2466 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in unbind_rdev_from_array()
2467 rdev->mddev = NULL; in unbind_rdev_from_array()
2535 static void export_array(struct mddev *mddev) in export_array() argument
2539 while (!list_empty(&mddev->disks)) { in export_array()
2540 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2544 mddev->raid_disks = 0; in export_array()
2545 mddev->major_version = 0; in export_array()
2548 static bool set_in_sync(struct mddev *mddev) in set_in_sync() argument
2550 lockdep_assert_held(&mddev->lock); in set_in_sync()
2551 if (!mddev->in_sync) { in set_in_sync()
2552 mddev->sync_checkers++; in set_in_sync()
2553 spin_unlock(&mddev->lock); in set_in_sync()
2554 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); in set_in_sync()
2555 spin_lock(&mddev->lock); in set_in_sync()
2556 if (!mddev->in_sync && in set_in_sync()
2557 percpu_ref_is_zero(&mddev->writes_pending)) { in set_in_sync()
2558 mddev->in_sync = 1; in set_in_sync()
2564 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in set_in_sync()
2565 sysfs_notify_dirent_safe(mddev->sysfs_state); in set_in_sync()
2567 if (--mddev->sync_checkers == 0) in set_in_sync()
2568 percpu_ref_switch_to_percpu(&mddev->writes_pending); in set_in_sync()
2570 if (mddev->safemode == 1) in set_in_sync()
2571 mddev->safemode = 0; in set_in_sync()
2572 return mddev->in_sync; in set_in_sync()
2575 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2584 rdev_for_each(rdev, mddev) { in sync_sbs()
2585 if (rdev->sb_events == mddev->events || in sync_sbs()
2588 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2592 sync_super(mddev, rdev); in sync_sbs()
2598 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2605 rdev_for_each(iter, mddev) in does_sb_need_changing()
2617 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2629 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2630 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2631 (mddev->layout != le32_to_cpu(sb->layout)) || in does_sb_need_changing()
2632 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2633 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2639 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2647 if (mddev->ro) { in md_update_sb()
2649 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2654 if (mddev_is_clustered(mddev)) { in md_update_sb()
2655 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2657 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2659 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2661 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2663 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2664 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2677 rdev_for_each(rdev, mddev) { in md_update_sb()
2679 mddev->delta_disks >= 0 && in md_update_sb()
2680 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb()
2681 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb()
2682 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb()
2685 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2686 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2689 if (!mddev->persistent) { in md_update_sb()
2690 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_update_sb()
2691 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2692 if (!mddev->external) { in md_update_sb()
2693 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_update_sb()
2694 rdev_for_each(rdev, mddev) { in md_update_sb()
2698 md_error(mddev, rdev); in md_update_sb()
2705 wake_up(&mddev->sb_wait); in md_update_sb()
2709 spin_lock(&mddev->lock); in md_update_sb()
2711 mddev->utime = ktime_get_real_seconds(); in md_update_sb()
2713 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2715 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2723 if (mddev->degraded) in md_update_sb()
2735 sync_req = mddev->in_sync; in md_update_sb()
2740 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2741 && mddev->can_decrease_events in md_update_sb()
2742 && mddev->events != 1) { in md_update_sb()
2743 mddev->events--; in md_update_sb()
2744 mddev->can_decrease_events = 0; in md_update_sb()
2747 mddev->events ++; in md_update_sb()
2748 mddev->can_decrease_events = nospares; in md_update_sb()
2756 WARN_ON(mddev->events == 0); in md_update_sb()
2758 rdev_for_each(rdev, mddev) { in md_update_sb()
2765 sync_sbs(mddev, nospares); in md_update_sb()
2766 spin_unlock(&mddev->lock); in md_update_sb()
2769 mdname(mddev), mddev->in_sync); in md_update_sb()
2771 if (mddev->queue) in md_update_sb()
2772 blk_add_trace_msg(mddev->queue, "md md_update_sb"); in md_update_sb()
2774 md_bitmap_update_sb(mddev->bitmap); in md_update_sb()
2775 rdev_for_each(rdev, mddev) { in md_update_sb()
2780 md_super_write(mddev,rdev, in md_update_sb()
2786 rdev->sb_events = mddev->events; in md_update_sb()
2788 md_super_write(mddev, rdev, in md_update_sb()
2799 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2803 if (md_super_wait(mddev) < 0) in md_update_sb()
2807 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2808 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2810 if (mddev->in_sync != sync_req || in md_update_sb()
2811 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2815 wake_up(&mddev->sb_wait); in md_update_sb()
2816 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2817 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_update_sb()
2819 rdev_for_each(rdev, mddev) { in md_update_sb()
2833 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2837 if (!mddev->pers->hot_remove_disk || add_journal) { in add_bound_rdev()
2842 super_types[mddev->major_version]. in add_bound_rdev()
2843 validate_super(mddev, rdev); in add_bound_rdev()
2845 mddev_suspend(mddev); in add_bound_rdev()
2846 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2848 mddev_resume(mddev); in add_bound_rdev()
2856 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in add_bound_rdev()
2857 if (mddev->degraded) in add_bound_rdev()
2858 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2859 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2861 md_wakeup_thread(mddev->thread); in add_bound_rdev()
2951 struct mddev *mddev = rdev->mddev; in state_store() local
2955 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
2956 md_error(rdev->mddev, rdev); in state_store()
2958 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) in state_store()
2963 if (rdev->mddev->pers) { in state_store()
2965 remove_and_add_spares(rdev->mddev, rdev); in state_store()
2971 if (mddev_is_clustered(mddev)) in state_store()
2972 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
2976 if (mddev->pers) { in state_store()
2977 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in state_store()
2978 md_wakeup_thread(mddev->thread); in state_store()
2985 mddev_create_serial_pool(rdev->mddev, rdev, false); in state_store()
2989 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in state_store()
3003 md_error(rdev->mddev, rdev); in state_store()
3008 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3009 md_wakeup_thread(rdev->mddev->thread); in state_store()
3025 if (rdev->mddev->pers == NULL) { in state_store()
3046 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3047 md_wakeup_thread(rdev->mddev->thread); in state_store()
3060 if (rdev->mddev->pers) in state_store()
3068 if (rdev->mddev->pers) in state_store()
3075 if (!rdev->mddev->pers) in state_store()
3085 if (!mddev_is_clustered(rdev->mddev) || in state_store()
3092 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { in state_store()
3096 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { in state_store()
3101 md_update_sb(mddev, 1); in state_store()
3156 if (rdev->mddev->pers && slot == -1) { in slot_store()
3167 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
3170 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
3173 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
3174 md_wakeup_thread(rdev->mddev->thread); in slot_store()
3175 } else if (rdev->mddev->pers) { in slot_store()
3184 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
3187 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
3190 if (slot >= rdev->mddev->raid_disks && in slot_store()
3191 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3201 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); in slot_store()
3208 sysfs_link_rdev(rdev->mddev, rdev); in slot_store()
3211 if (slot >= rdev->mddev->raid_disks && in slot_store()
3212 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3239 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
3241 if (rdev->sectors && rdev->mddev->external) in offset_store()
3263 struct mddev *mddev = rdev->mddev; in new_offset_store() local
3268 if (mddev->sync_thread || in new_offset_store()
3269 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
3277 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3286 mddev->reshape_backwards) in new_offset_store()
3293 !mddev->reshape_backwards) in new_offset_store()
3296 if (mddev->pers && mddev->persistent && in new_offset_store()
3297 !super_types[mddev->major_version] in new_offset_store()
3302 mddev->reshape_backwards = 1; in new_offset_store()
3304 mddev->reshape_backwards = 0; in new_offset_store()
3329 struct mddev *mddev; in md_rdev_overlaps() local
3333 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { in md_rdev_overlaps()
3334 if (test_bit(MD_DELETED, &mddev->flags)) in md_rdev_overlaps()
3336 rdev_for_each(rdev2, mddev) { in md_rdev_overlaps()
3370 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
3439 if (rdev->mddev->pers && in recovery_start_store()
3507 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_sector_store()
3511 if (rdev->mddev->persistent) { in ppl_sector_store()
3512 if (rdev->mddev->major_version == 0) in ppl_sector_store()
3520 } else if (!rdev->mddev->external) { in ppl_sector_store()
3544 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_size_store()
3548 if (rdev->mddev->persistent) { in ppl_size_store()
3549 if (rdev->mddev->major_version == 0) in ppl_size_store()
3553 } else if (!rdev->mddev->external) { in ppl_size_store()
3586 if (!rdev->mddev) in rdev_attr_show()
3598 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3604 rv = mddev ? mddev_lock(mddev) : -ENODEV; in rdev_attr_store()
3606 if (rdev->mddev == NULL) in rdev_attr_store()
3610 mddev_unlock(mddev); in rdev_attr_store()
3727 static int analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3733 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3734 switch (super_types[mddev->major_version]. in analyze_sbs()
3735 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3753 super_types[mddev->major_version]. in analyze_sbs()
3754 validate_super(mddev, freshest); in analyze_sbs()
3757 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3758 if (mddev->max_disks && in analyze_sbs()
3759 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3760 i > mddev->max_disks)) { in analyze_sbs()
3762 mdname(mddev), rdev->bdev, in analyze_sbs()
3763 mddev->max_disks); in analyze_sbs()
3768 if (super_types[mddev->major_version]. in analyze_sbs()
3769 validate_super(mddev, rdev)) { in analyze_sbs()
3776 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3781 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3828 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3830 int msec = (mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3834 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3838 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3846 mddev->safemode_delay = 0; in safe_delay_store()
3848 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3853 mddev->safemode_delay = new_delay; in safe_delay_store()
3855 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3863 level_show(struct mddev *mddev, char *page) in level_show() argument
3867 spin_lock(&mddev->lock); in level_show()
3868 p = mddev->pers; in level_show()
3871 else if (mddev->clevel[0]) in level_show()
3872 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3873 else if (mddev->level != LEVEL_NONE) in level_show()
3874 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3877 spin_unlock(&mddev->lock); in level_show()
3882 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3895 rv = mddev_lock(mddev); in level_store()
3899 if (mddev->pers == NULL) { in level_store()
3900 strncpy(mddev->clevel, buf, slen); in level_store()
3901 if (mddev->clevel[slen-1] == '\n') in level_store()
3903 mddev->clevel[slen] = 0; in level_store()
3904 mddev->level = LEVEL_NONE; in level_store()
3909 if (mddev->ro) in level_store()
3919 if (mddev->sync_thread || in level_store()
3920 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3921 mddev->reshape_position != MaxSector || in level_store()
3922 mddev->sysfs_active) in level_store()
3926 if (!mddev->pers->quiesce) { in level_store()
3928 mdname(mddev), mddev->pers->name); in level_store()
3952 if (pers == mddev->pers) { in level_store()
3961 mdname(mddev), clevel); in level_store()
3966 rdev_for_each(rdev, mddev) in level_store()
3972 priv = pers->takeover(mddev); in level_store()
3974 mddev->new_level = mddev->level; in level_store()
3975 mddev->new_layout = mddev->layout; in level_store()
3976 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
3977 mddev->raid_disks -= mddev->delta_disks; in level_store()
3978 mddev->delta_disks = 0; in level_store()
3979 mddev->reshape_backwards = 0; in level_store()
3982 mdname(mddev), clevel); in level_store()
3988 mddev_suspend(mddev); in level_store()
3989 mddev_detach(mddev); in level_store()
3991 spin_lock(&mddev->lock); in level_store()
3992 oldpers = mddev->pers; in level_store()
3993 oldpriv = mddev->private; in level_store()
3994 mddev->pers = pers; in level_store()
3995 mddev->private = priv; in level_store()
3996 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
3997 mddev->level = mddev->new_level; in level_store()
3998 mddev->layout = mddev->new_layout; in level_store()
3999 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
4000 mddev->delta_disks = 0; in level_store()
4001 mddev->reshape_backwards = 0; in level_store()
4002 mddev->degraded = 0; in level_store()
4003 spin_unlock(&mddev->lock); in level_store()
4006 mddev->external) { in level_store()
4014 mddev->in_sync = 0; in level_store()
4015 mddev->safemode_delay = 0; in level_store()
4016 mddev->safemode = 0; in level_store()
4019 oldpers->free(mddev, oldpriv); in level_store()
4024 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
4026 mdname(mddev)); in level_store()
4027 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
4028 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in level_store()
4029 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in level_store()
4034 if (mddev->to_remove == NULL) in level_store()
4035 mddev->to_remove = &md_redundancy_group; in level_store()
4040 rdev_for_each(rdev, mddev) { in level_store()
4043 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
4047 sysfs_unlink_rdev(mddev, rdev); in level_store()
4049 rdev_for_each(rdev, mddev) { in level_store()
4058 if (sysfs_link_rdev(mddev, rdev)) in level_store()
4060 rdev->raid_disk, mdname(mddev)); in level_store()
4068 mddev->in_sync = 1; in level_store()
4069 del_timer_sync(&mddev->safemode_timer); in level_store()
4071 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
4072 pers->run(mddev); in level_store()
4073 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in level_store()
4074 mddev_resume(mddev); in level_store()
4075 if (!mddev->thread) in level_store()
4076 md_update_sb(mddev, 1); in level_store()
4077 sysfs_notify_dirent_safe(mddev->sysfs_level); in level_store()
4081 mddev_unlock(mddev); in level_store()
4089 layout_show(struct mddev *mddev, char *page) in layout_show() argument
4092 if (mddev->reshape_position != MaxSector && in layout_show()
4093 mddev->layout != mddev->new_layout) in layout_show()
4095 mddev->new_layout, mddev->layout); in layout_show()
4096 return sprintf(page, "%d\n", mddev->layout); in layout_show()
4100 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
4108 err = mddev_lock(mddev); in layout_store()
4112 if (mddev->pers) { in layout_store()
4113 if (mddev->pers->check_reshape == NULL) in layout_store()
4115 else if (mddev->ro) in layout_store()
4118 mddev->new_layout = n; in layout_store()
4119 err = mddev->pers->check_reshape(mddev); in layout_store()
4121 mddev->new_layout = mddev->layout; in layout_store()
4124 mddev->new_layout = n; in layout_store()
4125 if (mddev->reshape_position == MaxSector) in layout_store()
4126 mddev->layout = n; in layout_store()
4128 mddev_unlock(mddev); in layout_store()
4135 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
4137 if (mddev->raid_disks == 0) in raid_disks_show()
4139 if (mddev->reshape_position != MaxSector && in raid_disks_show()
4140 mddev->delta_disks != 0) in raid_disks_show()
4141 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
4142 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
4143 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
4146 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4149 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
4158 err = mddev_lock(mddev); in raid_disks_store()
4161 if (mddev->pers) in raid_disks_store()
4162 err = update_raid_disks(mddev, n); in raid_disks_store()
4163 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
4165 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
4168 rdev_for_each(rdev, mddev) { in raid_disks_store()
4177 mddev->delta_disks = n - olddisks; in raid_disks_store()
4178 mddev->raid_disks = n; in raid_disks_store()
4179 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
4181 mddev->raid_disks = n; in raid_disks_store()
4183 mddev_unlock(mddev); in raid_disks_store()
4190 uuid_show(struct mddev *mddev, char *page) in uuid_show() argument
4192 return sprintf(page, "%pU\n", mddev->uuid); in uuid_show()
4198 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
4200 if (mddev->reshape_position != MaxSector && in chunk_size_show()
4201 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
4203 mddev->new_chunk_sectors << 9, in chunk_size_show()
4204 mddev->chunk_sectors << 9); in chunk_size_show()
4205 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
4209 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
4218 err = mddev_lock(mddev); in chunk_size_store()
4221 if (mddev->pers) { in chunk_size_store()
4222 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
4224 else if (mddev->ro) in chunk_size_store()
4227 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4228 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
4230 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
4233 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4234 if (mddev->reshape_position == MaxSector) in chunk_size_store()
4235 mddev->chunk_sectors = n >> 9; in chunk_size_store()
4237 mddev_unlock(mddev); in chunk_size_store()
4244 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
4246 if (mddev->recovery_cp == MaxSector) in resync_start_show()
4248 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
4252 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
4267 err = mddev_lock(mddev); in resync_start_store()
4270 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
4274 mddev->recovery_cp = n; in resync_start_store()
4275 if (mddev->pers) in resync_start_store()
4276 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in resync_start_store()
4278 mddev_unlock(mddev); in resync_start_store()
4341 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
4345 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { in array_state_show()
4346 switch(mddev->ro) { in array_state_show()
4354 spin_lock(&mddev->lock); in array_state_show()
4355 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in array_state_show()
4357 else if (mddev->in_sync) in array_state_show()
4359 else if (mddev->safemode) in array_state_show()
4363 spin_unlock(&mddev->lock); in array_state_show()
4366 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) in array_state_show()
4369 if (list_empty(&mddev->disks) && in array_state_show()
4370 mddev->raid_disks == 0 && in array_state_show()
4371 mddev->dev_sectors == 0) in array_state_show()
4379 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4380 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4381 static int restart_array(struct mddev *mddev);
4384 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
4389 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { in array_state_store()
4393 spin_lock(&mddev->lock); in array_state_store()
4395 restart_array(mddev); in array_state_store()
4396 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4397 md_wakeup_thread(mddev->thread); in array_state_store()
4398 wake_up(&mddev->sb_wait); in array_state_store()
4400 restart_array(mddev); in array_state_store()
4401 if (!set_in_sync(mddev)) in array_state_store()
4405 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4406 spin_unlock(&mddev->lock); in array_state_store()
4409 err = mddev_lock(mddev); in array_state_store()
4418 err = do_md_stop(mddev, 0, NULL); in array_state_store()
4422 if (mddev->pers) in array_state_store()
4423 err = do_md_stop(mddev, 2, NULL); in array_state_store()
4430 if (mddev->pers) in array_state_store()
4431 err = md_set_readonly(mddev, NULL); in array_state_store()
4433 mddev->ro = 1; in array_state_store()
4434 set_disk_ro(mddev->gendisk, 1); in array_state_store()
4435 err = do_md_run(mddev); in array_state_store()
4439 if (mddev->pers) { in array_state_store()
4440 if (mddev->ro == 0) in array_state_store()
4441 err = md_set_readonly(mddev, NULL); in array_state_store()
4442 else if (mddev->ro == 1) in array_state_store()
4443 err = restart_array(mddev); in array_state_store()
4445 mddev->ro = 2; in array_state_store()
4446 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4449 mddev->ro = 2; in array_state_store()
4450 err = do_md_run(mddev); in array_state_store()
4454 if (mddev->pers) { in array_state_store()
4455 err = restart_array(mddev); in array_state_store()
4458 spin_lock(&mddev->lock); in array_state_store()
4459 if (!set_in_sync(mddev)) in array_state_store()
4461 spin_unlock(&mddev->lock); in array_state_store()
4466 if (mddev->pers) { in array_state_store()
4467 err = restart_array(mddev); in array_state_store()
4470 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4471 wake_up(&mddev->sb_wait); in array_state_store()
4474 mddev->ro = 0; in array_state_store()
4475 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4476 err = do_md_run(mddev); in array_state_store()
4487 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4488 mddev->hold_active = 0; in array_state_store()
4489 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4491 mddev_unlock(mddev); in array_state_store()
4498 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4500 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4504 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4512 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4521 null_show(struct mddev *mddev, char *page) in null_show() argument
4527 static void flush_rdev_wq(struct mddev *mddev) in flush_rdev_wq() argument
4532 rdev_for_each_rcu(rdev, mddev) in flush_rdev_wq()
4541 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4567 flush_rdev_wq(mddev); in new_dev_store()
4568 err = mddev_lock(mddev); in new_dev_store()
4571 if (mddev->persistent) { in new_dev_store()
4572 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4573 mddev->minor_version); in new_dev_store()
4574 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4576 = list_entry(mddev->disks.next, in new_dev_store()
4578 err = super_types[mddev->major_version] in new_dev_store()
4579 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4583 } else if (mddev->external) in new_dev_store()
4589 mddev_unlock(mddev); in new_dev_store()
4592 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4596 mddev_unlock(mddev); in new_dev_store()
4606 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4612 err = mddev_lock(mddev); in bitmap_store()
4615 if (!mddev->bitmap) in bitmap_store()
4627 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4630 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4632 mddev_unlock(mddev); in bitmap_store()
4640 size_show(struct mddev *mddev, char *page) in size_show() argument
4643 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4646 static int update_size(struct mddev *mddev, sector_t num_sectors);
4649 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4660 err = mddev_lock(mddev); in size_store()
4663 if (mddev->pers) { in size_store()
4664 err = update_size(mddev, sectors); in size_store()
4666 md_update_sb(mddev, 1); in size_store()
4668 if (mddev->dev_sectors == 0 || in size_store()
4669 mddev->dev_sectors > sectors) in size_store()
4670 mddev->dev_sectors = sectors; in size_store()
4674 mddev_unlock(mddev); in size_store()
4688 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4690 if (mddev->persistent) in metadata_show()
4692 mddev->major_version, mddev->minor_version); in metadata_show()
4693 else if (mddev->external) in metadata_show()
4694 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4700 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4710 err = mddev_lock(mddev); in metadata_store()
4714 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4716 else if (!list_empty(&mddev->disks)) in metadata_store()
4721 mddev->persistent = 0; in metadata_store()
4722 mddev->external = 0; in metadata_store()
4723 mddev->major_version = 0; in metadata_store()
4724 mddev->minor_version = 90; in metadata_store()
4729 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4730 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4731 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4732 mddev->metadata_type[namelen] = 0; in metadata_store()
4733 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4734 mddev->metadata_type[--namelen] = 0; in metadata_store()
4735 mddev->persistent = 0; in metadata_store()
4736 mddev->external = 1; in metadata_store()
4737 mddev->major_version = 0; in metadata_store()
4738 mddev->minor_version = 90; in metadata_store()
4752 mddev->major_version = major; in metadata_store()
4753 mddev->minor_version = minor; in metadata_store()
4754 mddev->persistent = 1; in metadata_store()
4755 mddev->external = 0; in metadata_store()
4758 mddev_unlock(mddev); in metadata_store()
4766 action_show(struct mddev *mddev, char *page) in action_show() argument
4769 unsigned long recovery = mddev->recovery; in action_show()
4773 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4785 else if (mddev->reshape_position != MaxSector) in action_show()
4792 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4794 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4800 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4802 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4803 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in action_store()
4804 mddev_lock(mddev) == 0) { in action_store()
4805 if (work_pending(&mddev->del_work)) in action_store()
4807 if (mddev->sync_thread) { in action_store()
4808 sector_t save_rp = mddev->reshape_position; in action_store()
4810 mddev_unlock(mddev); in action_store()
4811 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4812 md_unregister_thread(&mddev->sync_thread); in action_store()
4813 mddev_lock_nointr(mddev); in action_store()
4820 mddev->reshape_position = save_rp; in action_store()
4821 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4822 md_reap_sync_thread(mddev); in action_store()
4824 mddev_unlock(mddev); in action_store()
4826 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4829 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4831 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4832 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4835 if (mddev->pers->start_reshape == NULL) in action_store()
4837 err = mddev_lock(mddev); in action_store()
4839 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4842 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4843 err = mddev->pers->start_reshape(mddev); in action_store()
4845 mddev_unlock(mddev); in action_store()
4849 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in action_store()
4852 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4855 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4856 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4857 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4859 if (mddev->ro == 2) { in action_store()
4863 mddev->ro = 0; in action_store()
4864 md_wakeup_thread(mddev->sync_thread); in action_store()
4866 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4867 md_wakeup_thread(mddev->thread); in action_store()
4868 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4876 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4878 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4884 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4888 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4894 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4896 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4897 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4901 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4915 mddev->sync_speed_min = min; in sync_min_store()
4923 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4925 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4926 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4930 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4944 mddev->sync_speed_max = max; in sync_max_store()
4952 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4954 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
4959 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
4961 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
4965 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
4975 mddev->parallel_resync = n; in sync_force_parallel_store()
4977 if (mddev->sync_thread) in sync_force_parallel_store()
4989 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
4992 if (mddev->curr_resync == MD_RESYNC_NONE) in sync_speed_show()
4994 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
4995 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
4997 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
5004 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
5008 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
5011 if (mddev->curr_resync == MD_RESYNC_YIELDED || in sync_completed_show()
5012 mddev->curr_resync == MD_RESYNC_DELAYED) in sync_completed_show()
5015 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
5016 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
5017 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
5019 max_sectors = mddev->dev_sectors; in sync_completed_show()
5021 resync = mddev->curr_resync_completed; in sync_completed_show()
5029 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
5032 (unsigned long long)mddev->resync_min); in min_sync_show()
5035 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
5043 spin_lock(&mddev->lock); in min_sync_store()
5045 if (min > mddev->resync_max) in min_sync_store()
5049 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
5053 mddev->resync_min = round_down(min, 8); in min_sync_store()
5057 spin_unlock(&mddev->lock); in min_sync_store()
5065 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
5067 if (mddev->resync_max == MaxSector) in max_sync_show()
5071 (unsigned long long)mddev->resync_max); in max_sync_show()
5074 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
5077 spin_lock(&mddev->lock); in max_sync_store()
5079 mddev->resync_max = MaxSector; in max_sync_store()
5087 if (max < mddev->resync_min) in max_sync_store()
5091 if (max < mddev->resync_max && in max_sync_store()
5092 mddev->ro == 0 && in max_sync_store()
5093 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
5097 chunk = mddev->chunk_sectors; in max_sync_store()
5105 mddev->resync_max = max; in max_sync_store()
5107 wake_up(&mddev->recovery_wait); in max_sync_store()
5110 spin_unlock(&mddev->lock); in max_sync_store()
5118 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
5120 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
5124 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
5135 err = mddev_lock(mddev); in suspend_lo_store()
5139 if (mddev->pers == NULL || in suspend_lo_store()
5140 mddev->pers->quiesce == NULL) in suspend_lo_store()
5142 mddev_suspend(mddev); in suspend_lo_store()
5143 mddev->suspend_lo = new; in suspend_lo_store()
5144 mddev_resume(mddev); in suspend_lo_store()
5148 mddev_unlock(mddev); in suspend_lo_store()
5155 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
5157 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
5161 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
5172 err = mddev_lock(mddev); in suspend_hi_store()
5176 if (mddev->pers == NULL) in suspend_hi_store()
5179 mddev_suspend(mddev); in suspend_hi_store()
5180 mddev->suspend_hi = new; in suspend_hi_store()
5181 mddev_resume(mddev); in suspend_hi_store()
5185 mddev_unlock(mddev); in suspend_hi_store()
5192 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
5194 if (mddev->reshape_position != MaxSector) in reshape_position_show()
5196 (unsigned long long)mddev->reshape_position); in reshape_position_show()
5202 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
5213 err = mddev_lock(mddev); in reshape_position_store()
5217 if (mddev->pers) in reshape_position_store()
5219 mddev->reshape_position = new; in reshape_position_store()
5220 mddev->delta_disks = 0; in reshape_position_store()
5221 mddev->reshape_backwards = 0; in reshape_position_store()
5222 mddev->new_level = mddev->level; in reshape_position_store()
5223 mddev->new_layout = mddev->layout; in reshape_position_store()
5224 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
5225 rdev_for_each(rdev, mddev) in reshape_position_store()
5229 mddev_unlock(mddev); in reshape_position_store()
5238 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
5241 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
5245 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
5256 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
5259 err = mddev_lock(mddev); in reshape_direction_store()
5263 if (mddev->delta_disks) in reshape_direction_store()
5265 else if (mddev->persistent && in reshape_direction_store()
5266 mddev->major_version == 0) in reshape_direction_store()
5269 mddev->reshape_backwards = backwards; in reshape_direction_store()
5270 mddev_unlock(mddev); in reshape_direction_store()
5279 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
5281 if (mddev->external_size) in array_size_show()
5283 (unsigned long long)mddev->array_sectors/2); in array_size_show()
5289 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
5294 err = mddev_lock(mddev); in array_size_store()
5299 if (mddev_is_clustered(mddev)) { in array_size_store()
5300 mddev_unlock(mddev); in array_size_store()
5305 if (mddev->pers) in array_size_store()
5306 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5308 sectors = mddev->array_sectors; in array_size_store()
5310 mddev->external_size = 0; in array_size_store()
5314 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5317 mddev->external_size = 1; in array_size_store()
5321 mddev->array_sectors = sectors; in array_size_store()
5322 if (mddev->pers) in array_size_store()
5323 set_capacity_and_notify(mddev->gendisk, in array_size_store()
5324 mddev->array_sectors); in array_size_store()
5326 mddev_unlock(mddev); in array_size_store()
5335 consistency_policy_show(struct mddev *mddev, char *page) in consistency_policy_show() argument
5339 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in consistency_policy_show()
5341 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { in consistency_policy_show()
5343 } else if (mddev->bitmap) { in consistency_policy_show()
5345 } else if (mddev->pers) { in consistency_policy_show()
5346 if (mddev->pers->sync_request) in consistency_policy_show()
5358 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) in consistency_policy_store() argument
5362 if (mddev->pers) { in consistency_policy_store()
5363 if (mddev->pers->change_consistency_policy) in consistency_policy_store()
5364 err = mddev->pers->change_consistency_policy(mddev, buf); in consistency_policy_store()
5367 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { in consistency_policy_store()
5368 set_bit(MD_HAS_PPL, &mddev->flags); in consistency_policy_store()
5380 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) in fail_last_dev_show() argument
5382 return sprintf(page, "%d\n", mddev->fail_last_dev); in fail_last_dev_show()
5390 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) in fail_last_dev_store() argument
5399 if (value != mddev->fail_last_dev) in fail_last_dev_store()
5400 mddev->fail_last_dev = value; in fail_last_dev_store()
5408 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) in serialize_policy_show() argument
5410 if (mddev->pers == NULL || (mddev->pers->level != 1)) in serialize_policy_show()
5413 return sprintf(page, "%d\n", mddev->serialize_policy); in serialize_policy_show()
5421 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) in serialize_policy_store() argument
5430 if (value == mddev->serialize_policy) in serialize_policy_store()
5433 err = mddev_lock(mddev); in serialize_policy_store()
5436 if (mddev->pers == NULL || (mddev->pers->level != 1)) { in serialize_policy_store()
5442 mddev_suspend(mddev); in serialize_policy_store()
5444 mddev_create_serial_pool(mddev, NULL, true); in serialize_policy_store()
5446 mddev_destroy_serial_pool(mddev, NULL, true); in serialize_policy_store()
5447 mddev->serialize_policy = value; in serialize_policy_store()
5448 mddev_resume(mddev); in serialize_policy_store()
5450 mddev_unlock(mddev); in serialize_policy_store()
5517 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
5523 if (!mddev_get(mddev)) { in md_attr_show()
5529 rv = entry->show(mddev, page); in md_attr_show()
5530 mddev_put(mddev); in md_attr_show()
5539 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
5547 if (!mddev_get(mddev)) { in md_attr_store()
5552 rv = entry->store(mddev, page, length); in md_attr_store()
5553 mddev_put(mddev); in md_attr_store()
5559 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_kobj_release() local
5561 if (mddev->sysfs_state) in md_kobj_release()
5562 sysfs_put(mddev->sysfs_state); in md_kobj_release()
5563 if (mddev->sysfs_level) in md_kobj_release()
5564 sysfs_put(mddev->sysfs_level); in md_kobj_release()
5566 del_gendisk(mddev->gendisk); in md_kobj_release()
5567 put_disk(mddev->gendisk); in md_kobj_release()
5584 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
5586 kobject_put(&mddev->kobj); in mddev_delayed_delete()
5591 int mddev_init_writes_pending(struct mddev *mddev) in mddev_init_writes_pending() argument
5593 if (mddev->writes_pending.percpu_count_ptr) in mddev_init_writes_pending()
5595 if (percpu_ref_init(&mddev->writes_pending, no_op, in mddev_init_writes_pending()
5599 percpu_ref_put(&mddev->writes_pending); in mddev_init_writes_pending()
5604 struct mddev *md_alloc(dev_t dev, char *name) in md_alloc()
5616 struct mddev *mddev; in md_alloc() local
5631 mddev = mddev_alloc(dev); in md_alloc()
5632 if (IS_ERR(mddev)) { in md_alloc()
5633 error = PTR_ERR(mddev); in md_alloc()
5637 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
5639 unit = MINOR(mddev->unit) >> shift; in md_alloc()
5644 struct mddev *mddev2; in md_alloc()
5660 mddev->hold_active = UNTIL_STOP; in md_alloc()
5667 disk->major = MAJOR(mddev->unit); in md_alloc()
5677 disk->private_data = mddev; in md_alloc()
5679 mddev->queue = disk->queue; in md_alloc()
5680 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5681 blk_queue_write_cache(mddev->queue, true, true); in md_alloc()
5683 mddev->gendisk = disk; in md_alloc()
5688 kobject_init(&mddev->kobj, &md_ktype); in md_alloc()
5689 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); in md_alloc()
5696 mddev->hold_active = 0; in md_alloc()
5698 mddev_put(mddev); in md_alloc()
5702 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5703 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5704 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); in md_alloc()
5706 return mddev; in md_alloc()
5711 mddev_free(mddev); in md_alloc()
5719 struct mddev *mddev = md_alloc(dev, name); in md_alloc_and_put() local
5721 if (IS_ERR(mddev)) in md_alloc_and_put()
5722 return PTR_ERR(mddev); in md_alloc_and_put()
5723 mddev_put(mddev); in md_alloc_and_put()
5766 struct mddev *mddev = from_timer(mddev, t, safemode_timer); in md_safemode_timeout() local
5768 mddev->safemode = 1; in md_safemode_timeout()
5769 if (mddev->external) in md_safemode_timeout()
5770 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5772 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5777 int md_run(struct mddev *mddev) in md_run() argument
5784 if (list_empty(&mddev->disks)) in md_run()
5788 if (mddev->pers) in md_run()
5791 if (mddev->sysfs_active) in md_run()
5797 if (!mddev->raid_disks) { in md_run()
5798 if (!mddev->persistent) in md_run()
5800 err = analyze_sbs(mddev); in md_run()
5805 if (mddev->level != LEVEL_NONE) in md_run()
5806 request_module("md-level-%d", mddev->level); in md_run()
5807 else if (mddev->clevel[0]) in md_run()
5808 request_module("md-%s", mddev->clevel); in md_run()
5815 mddev->has_superblocks = false; in md_run()
5816 rdev_for_each(rdev, mddev) { in md_run()
5821 if (mddev->ro != 1 && rdev_read_only(rdev)) { in md_run()
5822 mddev->ro = 1; in md_run()
5823 if (mddev->gendisk) in md_run()
5824 set_disk_ro(mddev->gendisk, 1); in md_run()
5828 mddev->has_superblocks = true; in md_run()
5837 if (mddev->dev_sectors && in md_run()
5838 rdev->data_offset + mddev->dev_sectors in md_run()
5841 mdname(mddev)); in md_run()
5848 mdname(mddev)); in md_run()
5856 if (!bioset_initialized(&mddev->bio_set)) { in md_run()
5857 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5861 if (!bioset_initialized(&mddev->sync_set)) { in md_run()
5862 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5868 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5871 if (mddev->level != LEVEL_NONE) in md_run()
5873 mddev->level); in md_run()
5876 mddev->clevel); in md_run()
5881 if (mddev->level != pers->level) { in md_run()
5882 mddev->level = pers->level; in md_run()
5883 mddev->new_level = pers->level; in md_run()
5885 strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5887 if (mddev->reshape_position != MaxSector && in md_run()
5902 rdev_for_each(rdev, mddev) in md_run()
5903 rdev_for_each(rdev2, mddev) { in md_run()
5908 mdname(mddev), in md_run()
5919 mddev->recovery = 0; in md_run()
5921 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5923 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5925 if (start_readonly && mddev->ro == 0) in md_run()
5926 mddev->ro = 2; /* read-only, but switch on first write */ in md_run()
5928 err = pers->run(mddev); in md_run()
5931 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5932 WARN_ONCE(!mddev->external_size, in md_run()
5936 (unsigned long long)mddev->array_sectors / 2, in md_run()
5937 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5941 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5944 bitmap = md_bitmap_create(mddev, -1); in md_run()
5948 mdname(mddev), err); in md_run()
5950 mddev->bitmap = bitmap; in md_run()
5956 if (mddev->bitmap_info.max_write_behind > 0) { in md_run()
5959 rdev_for_each(rdev, mddev) { in md_run()
5964 if (create_pool && mddev->serial_info_pool == NULL) { in md_run()
5965 mddev->serial_info_pool = in md_run()
5968 if (!mddev->serial_info_pool) { in md_run()
5975 if (mddev->queue) { in md_run()
5978 rdev_for_each(rdev, mddev) { in md_run()
5984 if (mddev->degraded) in md_run()
5987 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
5989 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
5990 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue); in md_run()
5994 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue); in md_run()
5997 if (mddev->kobj.sd && in md_run()
5998 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
6000 mdname(mddev)); in md_run()
6001 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
6002 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in md_run()
6003 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in md_run()
6004 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ in md_run()
6005 mddev->ro = 0; in md_run()
6007 atomic_set(&mddev->max_corr_read_errors, in md_run()
6009 mddev->safemode = 0; in md_run()
6010 if (mddev_is_clustered(mddev)) in md_run()
6011 mddev->safemode_delay = 0; in md_run()
6013 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in md_run()
6014 mddev->in_sync = 1; in md_run()
6016 spin_lock(&mddev->lock); in md_run()
6017 mddev->pers = pers; in md_run()
6018 spin_unlock(&mddev->lock); in md_run()
6019 rdev_for_each(rdev, mddev) in md_run()
6021 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ in md_run()
6023 if (mddev->degraded && !mddev->ro) in md_run()
6027 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
6028 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
6030 if (mddev->sb_flags) in md_run()
6031 md_update_sb(mddev, 0); in md_run()
6037 mddev_detach(mddev); in md_run()
6038 if (mddev->private) in md_run()
6039 pers->free(mddev, mddev->private); in md_run()
6040 mddev->private = NULL; in md_run()
6042 md_bitmap_destroy(mddev); in md_run()
6044 bioset_exit(&mddev->sync_set); in md_run()
6046 bioset_exit(&mddev->bio_set); in md_run()
6051 int do_md_run(struct mddev *mddev) in do_md_run() argument
6055 set_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6056 err = md_run(mddev); in do_md_run()
6059 err = md_bitmap_load(mddev); in do_md_run()
6061 md_bitmap_destroy(mddev); in do_md_run()
6065 if (mddev_is_clustered(mddev)) in do_md_run()
6066 md_allow_write(mddev); in do_md_run()
6069 md_start(mddev); in do_md_run()
6071 md_wakeup_thread(mddev->thread); in do_md_run()
6072 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
6074 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); in do_md_run()
6075 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6076 mddev->changed = 1; in do_md_run()
6077 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
6078 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_run()
6079 sysfs_notify_dirent_safe(mddev->sysfs_action); in do_md_run()
6080 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in do_md_run()
6082 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6086 int md_start(struct mddev *mddev) in md_start() argument
6090 if (mddev->pers->start) { in md_start()
6091 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6092 md_wakeup_thread(mddev->thread); in md_start()
6093 ret = mddev->pers->start(mddev); in md_start()
6094 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6095 md_wakeup_thread(mddev->sync_thread); in md_start()
6101 static int restart_array(struct mddev *mddev) in restart_array() argument
6103 struct gendisk *disk = mddev->gendisk; in restart_array()
6109 if (list_empty(&mddev->disks)) in restart_array()
6111 if (!mddev->pers) in restart_array()
6113 if (!mddev->ro) in restart_array()
6117 rdev_for_each_rcu(rdev, mddev) { in restart_array()
6125 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) in restart_array()
6131 mddev->safemode = 0; in restart_array()
6132 mddev->ro = 0; in restart_array()
6134 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); in restart_array()
6136 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
6137 md_wakeup_thread(mddev->thread); in restart_array()
6138 md_wakeup_thread(mddev->sync_thread); in restart_array()
6139 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
6143 static void md_clean(struct mddev *mddev) in md_clean() argument
6145 mddev->array_sectors = 0; in md_clean()
6146 mddev->external_size = 0; in md_clean()
6147 mddev->dev_sectors = 0; in md_clean()
6148 mddev->raid_disks = 0; in md_clean()
6149 mddev->recovery_cp = 0; in md_clean()
6150 mddev->resync_min = 0; in md_clean()
6151 mddev->resync_max = MaxSector; in md_clean()
6152 mddev->reshape_position = MaxSector; in md_clean()
6153 mddev->external = 0; in md_clean()
6154 mddev->persistent = 0; in md_clean()
6155 mddev->level = LEVEL_NONE; in md_clean()
6156 mddev->clevel[0] = 0; in md_clean()
6157 mddev->flags = 0; in md_clean()
6158 mddev->sb_flags = 0; in md_clean()
6159 mddev->ro = 0; in md_clean()
6160 mddev->metadata_type[0] = 0; in md_clean()
6161 mddev->chunk_sectors = 0; in md_clean()
6162 mddev->ctime = mddev->utime = 0; in md_clean()
6163 mddev->layout = 0; in md_clean()
6164 mddev->max_disks = 0; in md_clean()
6165 mddev->events = 0; in md_clean()
6166 mddev->can_decrease_events = 0; in md_clean()
6167 mddev->delta_disks = 0; in md_clean()
6168 mddev->reshape_backwards = 0; in md_clean()
6169 mddev->new_level = LEVEL_NONE; in md_clean()
6170 mddev->new_layout = 0; in md_clean()
6171 mddev->new_chunk_sectors = 0; in md_clean()
6172 mddev->curr_resync = 0; in md_clean()
6173 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
6174 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
6175 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
6176 mddev->recovery = 0; in md_clean()
6177 mddev->in_sync = 0; in md_clean()
6178 mddev->changed = 0; in md_clean()
6179 mddev->degraded = 0; in md_clean()
6180 mddev->safemode = 0; in md_clean()
6181 mddev->private = NULL; in md_clean()
6182 mddev->cluster_info = NULL; in md_clean()
6183 mddev->bitmap_info.offset = 0; in md_clean()
6184 mddev->bitmap_info.default_offset = 0; in md_clean()
6185 mddev->bitmap_info.default_space = 0; in md_clean()
6186 mddev->bitmap_info.chunksize = 0; in md_clean()
6187 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
6188 mddev->bitmap_info.max_write_behind = 0; in md_clean()
6189 mddev->bitmap_info.nodes = 0; in md_clean()
6192 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
6194 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
6195 if (work_pending(&mddev->del_work)) in __md_stop_writes()
6197 if (mddev->sync_thread) { in __md_stop_writes()
6198 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
6199 md_unregister_thread(&mddev->sync_thread); in __md_stop_writes()
6200 md_reap_sync_thread(mddev); in __md_stop_writes()
6203 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
6205 if (mddev->pers && mddev->pers->quiesce) { in __md_stop_writes()
6206 mddev->pers->quiesce(mddev, 1); in __md_stop_writes()
6207 mddev->pers->quiesce(mddev, 0); in __md_stop_writes()
6209 md_bitmap_flush(mddev); in __md_stop_writes()
6211 if (mddev->ro == 0 && in __md_stop_writes()
6212 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
6213 mddev->sb_flags)) { in __md_stop_writes()
6215 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
6216 mddev->in_sync = 1; in __md_stop_writes()
6217 md_update_sb(mddev, 1); in __md_stop_writes()
6220 mddev->serialize_policy = 0; in __md_stop_writes()
6221 mddev_destroy_serial_pool(mddev, NULL, true); in __md_stop_writes()
6224 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
6226 mddev_lock_nointr(mddev); in md_stop_writes()
6227 __md_stop_writes(mddev); in md_stop_writes()
6228 mddev_unlock(mddev); in md_stop_writes()
6232 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
6234 md_bitmap_wait_behind_writes(mddev); in mddev_detach()
6235 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { in mddev_detach()
6236 mddev->pers->quiesce(mddev, 1); in mddev_detach()
6237 mddev->pers->quiesce(mddev, 0); in mddev_detach()
6239 md_unregister_thread(&mddev->thread); in mddev_detach()
6240 if (mddev->queue) in mddev_detach()
6241 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
6244 static void __md_stop(struct mddev *mddev) in __md_stop() argument
6246 struct md_personality *pers = mddev->pers; in __md_stop()
6247 md_bitmap_destroy(mddev); in __md_stop()
6248 mddev_detach(mddev); in __md_stop()
6250 if (mddev->event_work.func) in __md_stop()
6252 spin_lock(&mddev->lock); in __md_stop()
6253 mddev->pers = NULL; in __md_stop()
6254 spin_unlock(&mddev->lock); in __md_stop()
6255 if (mddev->private) in __md_stop()
6256 pers->free(mddev, mddev->private); in __md_stop()
6257 mddev->private = NULL; in __md_stop()
6258 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
6259 mddev->to_remove = &md_redundancy_group; in __md_stop()
6261 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
6264 void md_stop(struct mddev *mddev) in md_stop() argument
6269 __md_stop_writes(mddev); in md_stop()
6270 __md_stop(mddev); in md_stop()
6271 bioset_exit(&mddev->bio_set); in md_stop()
6272 bioset_exit(&mddev->sync_set); in md_stop()
6277 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
6282 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
6284 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6285 md_wakeup_thread(mddev->thread); in md_set_readonly()
6287 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
6288 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
6289 if (mddev->sync_thread) in md_set_readonly()
6292 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
6294 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in md_set_readonly()
6296 mddev_unlock(mddev); in md_set_readonly()
6298 &mddev->recovery)); in md_set_readonly()
6299 wait_event(mddev->sb_wait, in md_set_readonly()
6300 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_set_readonly()
6301 mddev_lock_nointr(mddev); in md_set_readonly()
6303 mutex_lock(&mddev->open_mutex); in md_set_readonly()
6304 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
6305 mddev->sync_thread || in md_set_readonly()
6306 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_set_readonly()
6307 pr_warn("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
6309 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6310 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6311 md_wakeup_thread(mddev->thread); in md_set_readonly()
6316 if (mddev->pers) { in md_set_readonly()
6317 __md_stop_writes(mddev); in md_set_readonly()
6320 if (mddev->ro==1) in md_set_readonly()
6322 mddev->ro = 1; in md_set_readonly()
6323 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
6324 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6325 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6326 md_wakeup_thread(mddev->thread); in md_set_readonly()
6327 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
6331 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
6339 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
6342 struct gendisk *disk = mddev->gendisk; in do_md_stop()
6346 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
6348 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6349 md_wakeup_thread(mddev->thread); in do_md_stop()
6351 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
6352 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
6353 if (mddev->sync_thread) in do_md_stop()
6356 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
6358 mddev_unlock(mddev); in do_md_stop()
6359 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
6361 &mddev->recovery))); in do_md_stop()
6362 mddev_lock_nointr(mddev); in do_md_stop()
6364 mutex_lock(&mddev->open_mutex); in do_md_stop()
6365 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
6366 mddev->sysfs_active || in do_md_stop()
6367 mddev->sync_thread || in do_md_stop()
6368 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in do_md_stop()
6369 pr_warn("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
6370 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6372 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6373 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
6374 md_wakeup_thread(mddev->thread); in do_md_stop()
6378 if (mddev->pers) { in do_md_stop()
6379 if (mddev->ro) in do_md_stop()
6382 __md_stop_writes(mddev); in do_md_stop()
6383 __md_stop(mddev); in do_md_stop()
6386 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6388 rdev_for_each(rdev, mddev) in do_md_stop()
6390 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
6393 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6394 mddev->changed = 1; in do_md_stop()
6396 if (mddev->ro) in do_md_stop()
6397 mddev->ro = 0; in do_md_stop()
6399 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6404 pr_info("md: %s stopped.\n", mdname(mddev)); in do_md_stop()
6406 if (mddev->bitmap_info.file) { in do_md_stop()
6407 struct file *f = mddev->bitmap_info.file; in do_md_stop()
6408 spin_lock(&mddev->lock); in do_md_stop()
6409 mddev->bitmap_info.file = NULL; in do_md_stop()
6410 spin_unlock(&mddev->lock); in do_md_stop()
6413 mddev->bitmap_info.offset = 0; in do_md_stop()
6415 export_array(mddev); in do_md_stop()
6417 md_clean(mddev); in do_md_stop()
6418 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
6419 mddev->hold_active = 0; in do_md_stop()
6422 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6427 static void autorun_array(struct mddev *mddev) in autorun_array() argument
6432 if (list_empty(&mddev->disks)) in autorun_array()
6437 rdev_for_each(rdev, mddev) { in autorun_array()
6442 err = do_md_run(mddev); in autorun_array()
6445 do_md_stop(mddev, 0, NULL); in autorun_array()
6464 struct mddev *mddev; in autorun_devices() local
6501 mddev = md_alloc(dev, NULL); in autorun_devices()
6502 if (IS_ERR(mddev)) in autorun_devices()
6505 if (mddev_lock(mddev)) in autorun_devices()
6506 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); in autorun_devices()
6507 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
6508 || !list_empty(&mddev->disks)) { in autorun_devices()
6510 mdname(mddev), rdev0->bdev); in autorun_devices()
6511 mddev_unlock(mddev); in autorun_devices()
6513 pr_debug("md: created %s\n", mdname(mddev)); in autorun_devices()
6514 mddev->persistent = 1; in autorun_devices()
6517 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
6520 autorun_array(mddev); in autorun_devices()
6521 mddev_unlock(mddev); in autorun_devices()
6530 mddev_put(mddev); in autorun_devices()
6550 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
6558 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
6575 info.major_version = mddev->major_version; in get_array_info()
6576 info.minor_version = mddev->minor_version; in get_array_info()
6578 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in get_array_info()
6579 info.level = mddev->level; in get_array_info()
6580 info.size = mddev->dev_sectors / 2; in get_array_info()
6581 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
6584 info.raid_disks = mddev->raid_disks; in get_array_info()
6585 info.md_minor = mddev->md_minor; in get_array_info()
6586 info.not_persistent= !mddev->persistent; in get_array_info()
6588 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in get_array_info()
6590 if (mddev->in_sync) in get_array_info()
6592 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
6594 if (mddev_is_clustered(mddev)) in get_array_info()
6601 info.layout = mddev->layout; in get_array_info()
6602 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
6610 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
6621 spin_lock(&mddev->lock); in get_bitmap_file()
6623 if (mddev->bitmap_info.file) { in get_bitmap_file()
6624 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
6632 spin_unlock(&mddev->lock); in get_bitmap_file()
6642 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
6651 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
6682 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) in md_add_new_disk() argument
6687 if (mddev_is_clustered(mddev) && in md_add_new_disk()
6690 mdname(mddev)); in md_add_new_disk()
6697 if (!mddev->raid_disks) { in md_add_new_disk()
6700 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in md_add_new_disk()
6706 if (!list_empty(&mddev->disks)) { in md_add_new_disk()
6708 = list_entry(mddev->disks.next, in md_add_new_disk()
6710 err = super_types[mddev->major_version] in md_add_new_disk()
6711 .load_super(rdev, rdev0, mddev->minor_version); in md_add_new_disk()
6720 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6731 if (mddev->pers) { in md_add_new_disk()
6733 if (!mddev->pers->hot_add_disk) { in md_add_new_disk()
6735 mdname(mddev)); in md_add_new_disk()
6738 if (mddev->persistent) in md_add_new_disk()
6739 rdev = md_import_device(dev, mddev->major_version, in md_add_new_disk()
6740 mddev->minor_version); in md_add_new_disk()
6749 if (!mddev->persistent) { in md_add_new_disk()
6751 info->raid_disk < mddev->raid_disks) { in md_add_new_disk()
6759 super_types[mddev->major_version]. in md_add_new_disk()
6760 validate_super(mddev, rdev); in md_add_new_disk()
6785 rdev_for_each(rdev2, mddev) { in md_add_new_disk()
6791 if (has_journal || mddev->bitmap) { in md_add_new_disk()
6800 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6805 err = md_cluster_ops->add_new_disk(mddev, rdev); in md_add_new_disk()
6814 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6819 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6822 err = md_cluster_ops->new_disk_ack(mddev, in md_add_new_disk()
6829 md_cluster_ops->add_new_disk_cancel(mddev); in md_add_new_disk()
6843 if (mddev->major_version != 0) { in md_add_new_disk()
6844 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); in md_add_new_disk()
6857 if (info->raid_disk < mddev->raid_disks) in md_add_new_disk()
6862 if (rdev->raid_disk < mddev->raid_disks) in md_add_new_disk()
6871 if (!mddev->persistent) { in md_add_new_disk()
6878 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6888 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6892 if (!mddev->pers) in hot_remove_disk()
6895 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6903 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6909 if (mddev_is_clustered(mddev)) { in hot_remove_disk()
6910 if (md_cluster_ops->remove_disk(mddev, rdev)) in hot_remove_disk()
6915 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_remove_disk()
6916 if (mddev->thread) in hot_remove_disk()
6917 md_wakeup_thread(mddev->thread); in hot_remove_disk()
6919 md_update_sb(mddev, 1); in hot_remove_disk()
6925 rdev->bdev, mdname(mddev)); in hot_remove_disk()
6929 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6934 if (!mddev->pers) in hot_add_disk()
6937 if (mddev->major_version != 0) { in hot_add_disk()
6939 mdname(mddev)); in hot_add_disk()
6942 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6944 mdname(mddev)); in hot_add_disk()
6955 if (mddev->persistent) in hot_add_disk()
6964 rdev->bdev, mdname(mddev)); in hot_add_disk()
6972 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
6983 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_add_disk()
6984 if (!mddev->thread) in hot_add_disk()
6985 md_update_sb(mddev, 1); in hot_add_disk()
6992 mdname(mddev), rdev->bdev); in hot_add_disk()
6993 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue); in hot_add_disk()
6999 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
7000 md_wakeup_thread(mddev->thread); in hot_add_disk()
7009 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
7013 if (mddev->pers) { in set_bitmap_file()
7014 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
7016 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
7025 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
7031 mdname(mddev)); in set_bitmap_file()
7038 mdname(mddev)); in set_bitmap_file()
7042 mdname(mddev)); in set_bitmap_file()
7046 mdname(mddev)); in set_bitmap_file()
7053 mddev->bitmap_info.file = f; in set_bitmap_file()
7054 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
7055 } else if (mddev->bitmap == NULL) in set_bitmap_file()
7058 if (mddev->pers) { in set_bitmap_file()
7062 bitmap = md_bitmap_create(mddev, -1); in set_bitmap_file()
7063 mddev_suspend(mddev); in set_bitmap_file()
7065 mddev->bitmap = bitmap; in set_bitmap_file()
7066 err = md_bitmap_load(mddev); in set_bitmap_file()
7070 md_bitmap_destroy(mddev); in set_bitmap_file()
7073 mddev_resume(mddev); in set_bitmap_file()
7075 mddev_suspend(mddev); in set_bitmap_file()
7076 md_bitmap_destroy(mddev); in set_bitmap_file()
7077 mddev_resume(mddev); in set_bitmap_file()
7081 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
7083 spin_lock(&mddev->lock); in set_bitmap_file()
7084 mddev->bitmap_info.file = NULL; in set_bitmap_file()
7085 spin_unlock(&mddev->lock); in set_bitmap_file()
7106 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) in md_set_array_info() argument
7118 mddev->major_version = info->major_version; in md_set_array_info()
7119 mddev->minor_version = info->minor_version; in md_set_array_info()
7120 mddev->patch_version = info->patch_version; in md_set_array_info()
7121 mddev->persistent = !info->not_persistent; in md_set_array_info()
7125 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7128 mddev->major_version = MD_MAJOR_VERSION; in md_set_array_info()
7129 mddev->minor_version = MD_MINOR_VERSION; in md_set_array_info()
7130 mddev->patch_version = MD_PATCHLEVEL_VERSION; in md_set_array_info()
7131 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7133 mddev->level = info->level; in md_set_array_info()
7134 mddev->clevel[0] = 0; in md_set_array_info()
7135 mddev->dev_sectors = 2 * (sector_t)info->size; in md_set_array_info()
7136 mddev->raid_disks = info->raid_disks; in md_set_array_info()
7141 mddev->recovery_cp = MaxSector; in md_set_array_info()
7143 mddev->recovery_cp = 0; in md_set_array_info()
7144 mddev->persistent = ! info->not_persistent; in md_set_array_info()
7145 mddev->external = 0; in md_set_array_info()
7147 mddev->layout = info->layout; in md_set_array_info()
7148 if (mddev->level == 0) in md_set_array_info()
7150 mddev->layout = -1; in md_set_array_info()
7151 mddev->chunk_sectors = info->chunk_size >> 9; in md_set_array_info()
7153 if (mddev->persistent) { in md_set_array_info()
7154 mddev->max_disks = MD_SB_DISKS; in md_set_array_info()
7155 mddev->flags = 0; in md_set_array_info()
7156 mddev->sb_flags = 0; in md_set_array_info()
7158 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_set_array_info()
7160 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in md_set_array_info()
7161 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in md_set_array_info()
7162 mddev->bitmap_info.offset = 0; in md_set_array_info()
7164 mddev->reshape_position = MaxSector; in md_set_array_info()
7169 get_random_bytes(mddev->uuid, 16); in md_set_array_info()
7171 mddev->new_level = mddev->level; in md_set_array_info()
7172 mddev->new_chunk_sectors = mddev->chunk_sectors; in md_set_array_info()
7173 mddev->new_layout = mddev->layout; in md_set_array_info()
7174 mddev->delta_disks = 0; in md_set_array_info()
7175 mddev->reshape_backwards = 0; in md_set_array_info()
7180 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
7182 lockdep_assert_held(&mddev->reconfig_mutex); in md_set_array_sectors()
7184 if (mddev->external_size) in md_set_array_sectors()
7187 mddev->array_sectors = array_sectors; in md_set_array_sectors()
7191 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
7196 sector_t old_dev_sectors = mddev->dev_sectors; in update_size()
7198 if (mddev->pers->resize == NULL) in update_size()
7209 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
7210 mddev->sync_thread) in update_size()
7212 if (mddev->ro) in update_size()
7215 rdev_for_each(rdev, mddev) { in update_size()
7223 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
7225 if (mddev_is_clustered(mddev)) in update_size()
7226 md_cluster_ops->update_size(mddev, old_dev_sectors); in update_size()
7227 else if (mddev->queue) { in update_size()
7228 set_capacity_and_notify(mddev->gendisk, in update_size()
7229 mddev->array_sectors); in update_size()
7235 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
7240 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
7242 if (mddev->ro) in update_raid_disks()
7245 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
7247 if (mddev->sync_thread || in update_raid_disks()
7248 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
7249 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || in update_raid_disks()
7250 mddev->reshape_position != MaxSector) in update_raid_disks()
7253 rdev_for_each(rdev, mddev) { in update_raid_disks()
7254 if (mddev->raid_disks < raid_disks && in update_raid_disks()
7257 if (mddev->raid_disks > raid_disks && in update_raid_disks()
7262 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
7263 if (mddev->delta_disks < 0) in update_raid_disks()
7264 mddev->reshape_backwards = 1; in update_raid_disks()
7265 else if (mddev->delta_disks > 0) in update_raid_disks()
7266 mddev->reshape_backwards = 0; in update_raid_disks()
7268 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
7270 mddev->delta_disks = 0; in update_raid_disks()
7271 mddev->reshape_backwards = 0; in update_raid_disks()
7284 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
7291 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
7294 if (mddev->major_version != info->major_version || in update_array_info()
7295 mddev->minor_version != info->minor_version || in update_array_info()
7297 mddev->ctime != info->ctime || in update_array_info()
7298 mddev->level != info->level || in update_array_info()
7300 mddev->persistent != !info->not_persistent || in update_array_info()
7301 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
7307 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7309 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7311 if (mddev->layout != info->layout) in update_array_info()
7320 if (mddev->layout != info->layout) { in update_array_info()
7325 if (mddev->pers->check_reshape == NULL) in update_array_info()
7328 mddev->new_layout = info->layout; in update_array_info()
7329 rv = mddev->pers->check_reshape(mddev); in update_array_info()
7331 mddev->new_layout = mddev->layout; in update_array_info()
7335 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7336 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
7338 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7339 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
7342 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
7346 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
7353 if (mddev->bitmap) { in update_array_info()
7357 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
7361 mddev->bitmap_info.offset = in update_array_info()
7362 mddev->bitmap_info.default_offset; in update_array_info()
7363 mddev->bitmap_info.space = in update_array_info()
7364 mddev->bitmap_info.default_space; in update_array_info()
7365 bitmap = md_bitmap_create(mddev, -1); in update_array_info()
7366 mddev_suspend(mddev); in update_array_info()
7368 mddev->bitmap = bitmap; in update_array_info()
7369 rv = md_bitmap_load(mddev); in update_array_info()
7373 md_bitmap_destroy(mddev); in update_array_info()
7374 mddev_resume(mddev); in update_array_info()
7377 if (!mddev->bitmap) { in update_array_info()
7381 if (mddev->bitmap->storage.file) { in update_array_info()
7385 if (mddev->bitmap_info.nodes) { in update_array_info()
7387 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { in update_array_info()
7390 md_cluster_ops->unlock_all_bitmaps(mddev); in update_array_info()
7394 mddev->bitmap_info.nodes = 0; in update_array_info()
7395 md_cluster_ops->leave(mddev); in update_array_info()
7397 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in update_array_info()
7399 mddev_suspend(mddev); in update_array_info()
7400 md_bitmap_destroy(mddev); in update_array_info()
7401 mddev_resume(mddev); in update_array_info()
7402 mddev->bitmap_info.offset = 0; in update_array_info()
7405 md_update_sb(mddev, 1); in update_array_info()
7411 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
7416 if (mddev->pers == NULL) in set_disk_faulty()
7420 rdev = md_find_rdev_rcu(mddev, dev); in set_disk_faulty()
7424 md_error(mddev, rdev); in set_disk_faulty()
7425 if (test_bit(MD_BROKEN, &mddev->flags)) in set_disk_faulty()
7440 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
7444 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
7477 struct mddev *mddev = NULL; in md_ioctl() local
7508 mddev = bdev->bd_disk->private_data; in md_ioctl()
7510 if (!mddev) { in md_ioctl()
7518 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7521 err = get_array_info(mddev, argp); in md_ioctl()
7525 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7528 err = get_disk_info(mddev, argp); in md_ioctl()
7532 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
7536 err = get_bitmap_file(mddev, argp); in md_ioctl()
7542 flush_rdev_wq(mddev); in md_ioctl()
7546 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
7548 &mddev->recovery), in md_ioctl()
7554 mutex_lock(&mddev->open_mutex); in md_ioctl()
7555 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
7556 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7560 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { in md_ioctl()
7561 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7566 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7569 err = mddev_lock(mddev); in md_ioctl()
7584 if (mddev->pers) { in md_ioctl()
7585 err = update_array_info(mddev, &info); in md_ioctl()
7592 if (!list_empty(&mddev->disks)) { in md_ioctl()
7593 pr_warn("md: array %s already has disks!\n", mdname(mddev)); in md_ioctl()
7597 if (mddev->raid_disks) { in md_ioctl()
7598 pr_warn("md: array %s already initialised!\n", mdname(mddev)); in md_ioctl()
7602 err = md_set_array_info(mddev, &info); in md_ioctl()
7615 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
7628 err = restart_array(mddev); in md_ioctl()
7632 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
7636 err = md_set_readonly(mddev, bdev); in md_ioctl()
7640 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7648 if (mddev->pers) { in md_ioctl()
7656 err = md_add_new_disk(mddev, &info); in md_ioctl()
7666 if (mddev->ro && mddev->pers) { in md_ioctl()
7667 if (mddev->ro == 2) { in md_ioctl()
7668 mddev->ro = 0; in md_ioctl()
7669 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
7670 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
7675 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { in md_ioctl()
7676 mddev_unlock(mddev); in md_ioctl()
7677 wait_event(mddev->sb_wait, in md_ioctl()
7678 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && in md_ioctl()
7679 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_ioctl()
7680 mddev_lock_nointr(mddev); in md_ioctl()
7695 err = md_add_new_disk(mddev, &info); in md_ioctl()
7700 if (mddev_is_clustered(mddev)) in md_ioctl()
7701 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
7707 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7711 err = do_md_run(mddev); in md_ioctl()
7715 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
7724 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
7726 mddev->hold_active = 0; in md_ioctl()
7727 mddev_unlock(mddev); in md_ioctl()
7730 clear_bit(MD_CLOSING, &mddev->flags); in md_ioctl()
7755 struct mddev *mddev = bdev->bd_disk->private_data; in md_set_read_only() local
7758 err = mddev_lock(mddev); in md_set_read_only()
7762 if (!mddev->raid_disks && !mddev->external) { in md_set_read_only()
7771 if (!ro && mddev->ro == 1 && mddev->pers) { in md_set_read_only()
7772 err = restart_array(mddev); in md_set_read_only()
7775 mddev->ro = 2; in md_set_read_only()
7779 mddev_unlock(mddev); in md_set_read_only()
7785 struct mddev *mddev; in md_open() local
7789 mddev = mddev_get(bdev->bd_disk->private_data); in md_open()
7791 if (!mddev) in md_open()
7794 err = mutex_lock_interruptible(&mddev->open_mutex); in md_open()
7799 if (test_bit(MD_CLOSING, &mddev->flags)) in md_open()
7802 atomic_inc(&mddev->openers); in md_open()
7803 mutex_unlock(&mddev->open_mutex); in md_open()
7809 mutex_unlock(&mddev->open_mutex); in md_open()
7811 mddev_put(mddev); in md_open()
7817 struct mddev *mddev = disk->private_data; in md_release() local
7819 BUG_ON(!mddev); in md_release()
7820 atomic_dec(&mddev->openers); in md_release()
7821 mddev_put(mddev); in md_release()
7826 struct mddev *mddev = disk->private_data; in md_check_events() local
7829 if (mddev->changed) in md_check_events()
7831 mddev->changed = 0; in md_check_events()
7837 struct mddev *mddev = disk->private_data; in md_free_disk() local
7839 percpu_ref_exit(&mddev->writes_pending); in md_free_disk()
7840 bioset_exit(&mddev->bio_set); in md_free_disk()
7841 bioset_exit(&mddev->sync_set); in md_free_disk()
7843 mddev_free(mddev); in md_free_disk()
7916 struct mddev *mddev, const char *name) in md_register_thread() argument
7927 thread->mddev = mddev; in md_register_thread()
7931 mdname(thread->mddev), in md_register_thread()
7964 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
7969 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
7971 mddev->pers->error_handler(mddev, rdev); in md_error()
7973 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) in md_error()
7974 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
7976 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
7977 if (!test_bit(MD_BROKEN, &mddev->flags)) { in md_error()
7978 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
7979 md_wakeup_thread(mddev->thread); in md_error()
7981 if (mddev->event_work.func) in md_error()
7982 queue_work(md_misc_wq, &mddev->event_work); in md_error()
8006 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
8014 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
8015 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
8016 max_sectors = mddev->resync_max_sectors; in status_resync()
8018 max_sectors = mddev->dev_sectors; in status_resync()
8020 resync = mddev->curr_resync; in status_resync()
8022 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
8028 resync -= atomic_read(&mddev->recovery_active); in status_resync()
8041 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { in status_resync()
8044 rdev_for_each(rdev, mddev) in status_resync()
8052 if (mddev->reshape_position != MaxSector) in status_resync()
8058 if (mddev->recovery_cp < MaxSector) { in status_resync()
8095 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
8097 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
8099 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
8122 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
8125 curr_mark_cnt = mddev->curr_mark_cnt; in status_resync()
8126 recovery_active = atomic_read(&mddev->recovery_active); in status_resync()
8127 resync_mark_cnt = mddev->resync_mark_cnt; in status_resync()
8148 struct mddev *mddev; in md_seq_start() local
8163 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
8164 if (!mddev_get(mddev)) in md_seq_start()
8167 return mddev; in md_seq_start()
8178 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
8179 struct mddev *to_put = NULL; in md_seq_next()
8189 to_put = mddev; in md_seq_next()
8190 tmp = mddev->all_mddevs.next; in md_seq_next()
8199 next_mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_next()
8202 mddev = next_mddev; in md_seq_next()
8203 tmp = mddev->all_mddevs.next; in md_seq_next()
8208 mddev_put(mddev); in md_seq_next()
8215 struct mddev *mddev = v; in md_seq_stop() local
8217 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
8218 mddev_put(mddev); in md_seq_stop()
8223 struct mddev *mddev = v; in md_seq_show() local
8244 spin_lock(&mddev->lock); in md_seq_show()
8245 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
8246 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
8247 mddev->pers ? "" : "in"); in md_seq_show()
8248 if (mddev->pers) { in md_seq_show()
8249 if (mddev->ro==1) in md_seq_show()
8251 if (mddev->ro==2) in md_seq_show()
8253 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
8258 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
8277 if (!list_empty(&mddev->disks)) { in md_seq_show()
8278 if (mddev->pers) in md_seq_show()
8281 mddev->array_sectors / 2); in md_seq_show()
8286 if (mddev->persistent) { in md_seq_show()
8287 if (mddev->major_version != 0 || in md_seq_show()
8288 mddev->minor_version != 90) { in md_seq_show()
8290 mddev->major_version, in md_seq_show()
8291 mddev->minor_version); in md_seq_show()
8293 } else if (mddev->external) in md_seq_show()
8295 mddev->metadata_type); in md_seq_show()
8299 if (mddev->pers) { in md_seq_show()
8300 mddev->pers->status(seq, mddev); in md_seq_show()
8302 if (mddev->pers->sync_request) { in md_seq_show()
8303 if (status_resync(seq, mddev)) in md_seq_show()
8309 md_bitmap_status(seq, mddev->bitmap); in md_seq_show()
8313 spin_unlock(&mddev->lock); in md_seq_show()
8411 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
8425 ret = md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
8427 mddev->safemode_delay = 0; in md_setup_cluster()
8431 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
8435 md_cluster_ops->leave(mddev); in md_cluster_stop()
8439 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
8447 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
8482 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
8485 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
8486 wake_up(&mddev->recovery_wait); in md_done_sync()
8488 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
8489 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
8490 md_wakeup_thread(mddev->thread); in md_done_sync()
8503 bool md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
8510 BUG_ON(mddev->ro == 1); in md_write_start()
8511 if (mddev->ro == 2) { in md_write_start()
8513 mddev->ro = 0; in md_write_start()
8514 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
8515 md_wakeup_thread(mddev->thread); in md_write_start()
8516 md_wakeup_thread(mddev->sync_thread); in md_write_start()
8520 percpu_ref_get(&mddev->writes_pending); in md_write_start()
8522 if (mddev->safemode == 1) in md_write_start()
8523 mddev->safemode = 0; in md_write_start()
8525 if (mddev->in_sync || mddev->sync_checkers) { in md_write_start()
8526 spin_lock(&mddev->lock); in md_write_start()
8527 if (mddev->in_sync) { in md_write_start()
8528 mddev->in_sync = 0; in md_write_start()
8529 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_write_start()
8530 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_write_start()
8531 md_wakeup_thread(mddev->thread); in md_write_start()
8534 spin_unlock(&mddev->lock); in md_write_start()
8538 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
8539 if (!mddev->has_superblocks) in md_write_start()
8541 wait_event(mddev->sb_wait, in md_write_start()
8542 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || in md_write_start()
8543 mddev->suspended); in md_write_start()
8544 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in md_write_start()
8545 percpu_ref_put(&mddev->writes_pending); in md_write_start()
8560 void md_write_inc(struct mddev *mddev, struct bio *bi) in md_write_inc() argument
8564 WARN_ON_ONCE(mddev->in_sync || mddev->ro); in md_write_inc()
8565 percpu_ref_get(&mddev->writes_pending); in md_write_inc()
8569 void md_write_end(struct mddev *mddev) in md_write_end() argument
8571 percpu_ref_put(&mddev->writes_pending); in md_write_end()
8573 if (mddev->safemode == 2) in md_write_end()
8574 md_wakeup_thread(mddev->thread); in md_write_end()
8575 else if (mddev->safemode_delay) in md_write_end()
8579 mod_timer(&mddev->safemode_timer, in md_write_end()
8580 roundup(jiffies, mddev->safemode_delay) + in md_write_end()
8581 mddev->safemode_delay); in md_write_end()
8587 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, in md_submit_discard_bio() argument
8598 if (mddev->gendisk) in md_submit_discard_bio()
8600 disk_devt(mddev->gendisk), in md_submit_discard_bio()
8606 int acct_bioset_init(struct mddev *mddev) in acct_bioset_init() argument
8610 if (!bioset_initialized(&mddev->io_acct_set)) in acct_bioset_init()
8611 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, in acct_bioset_init()
8617 void acct_bioset_exit(struct mddev *mddev) in acct_bioset_exit() argument
8619 bioset_exit(&mddev->io_acct_set); in acct_bioset_exit()
8639 void md_account_bio(struct mddev *mddev, struct bio **bio) in md_account_bio() argument
8648 clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set); in md_account_bio()
8665 void md_allow_write(struct mddev *mddev) in md_allow_write() argument
8667 if (!mddev->pers) in md_allow_write()
8669 if (mddev->ro) in md_allow_write()
8671 if (!mddev->pers->sync_request) in md_allow_write()
8674 spin_lock(&mddev->lock); in md_allow_write()
8675 if (mddev->in_sync) { in md_allow_write()
8676 mddev->in_sync = 0; in md_allow_write()
8677 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_allow_write()
8678 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_allow_write()
8679 if (mddev->safemode_delay && in md_allow_write()
8680 mddev->safemode == 0) in md_allow_write()
8681 mddev->safemode = 1; in md_allow_write()
8682 spin_unlock(&mddev->lock); in md_allow_write()
8683 md_update_sb(mddev, 0); in md_allow_write()
8684 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
8686 wait_event(mddev->sb_wait, in md_allow_write()
8687 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_allow_write()
8689 spin_unlock(&mddev->lock); in md_allow_write()
8698 struct mddev *mddev = thread->mddev; in md_do_sync() local
8699 struct mddev *mddev2; in md_do_sync()
8714 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_do_sync()
8715 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) in md_do_sync()
8717 if (mddev->ro) {/* never try to sync a read-only array */ in md_do_sync()
8718 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8722 if (mddev_is_clustered(mddev)) { in md_do_sync()
8723 ret = md_cluster_ops->resync_start(mddev); in md_do_sync()
8727 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); in md_do_sync()
8728 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in md_do_sync()
8729 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || in md_do_sync()
8730 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) in md_do_sync()
8731 && ((unsigned long long)mddev->curr_resync_completed in md_do_sync()
8732 < (unsigned long long)mddev->resync_max_sectors)) in md_do_sync()
8736 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8737 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
8740 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
8745 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
8750 mddev->last_sync_action = action ?: desc; in md_do_sync()
8764 mddev->curr_resync = MD_RESYNC_DELAYED; in md_do_sync()
8767 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8773 if (mddev2 == mddev) in md_do_sync()
8775 if (!mddev->parallel_resync in md_do_sync()
8777 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
8779 if (mddev < mddev2 && in md_do_sync()
8780 mddev->curr_resync == MD_RESYNC_DELAYED) { in md_do_sync()
8782 mddev->curr_resync = MD_RESYNC_YIELDED; in md_do_sync()
8785 if (mddev > mddev2 && in md_do_sync()
8786 mddev->curr_resync == MD_RESYNC_YIELDED) in md_do_sync()
8796 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8797 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
8801 desc, mdname(mddev), in md_do_sync()
8816 } while (mddev->curr_resync < MD_RESYNC_DELAYED); in md_do_sync()
8819 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8823 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8824 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
8826 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8827 j = mddev->resync_min; in md_do_sync()
8828 else if (!mddev->bitmap) in md_do_sync()
8829 j = mddev->recovery_cp; in md_do_sync()
8831 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in md_do_sync()
8832 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8838 if (mddev_is_clustered(mddev) && in md_do_sync()
8839 mddev->reshape_position != MaxSector) in md_do_sync()
8840 j = mddev->reshape_position; in md_do_sync()
8843 max_sectors = mddev->dev_sectors; in md_do_sync()
8846 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8863 if (mddev->bitmap) { in md_do_sync()
8864 mddev->pers->quiesce(mddev, 1); in md_do_sync()
8865 mddev->pers->quiesce(mddev, 0); in md_do_sync()
8869 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
8870 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
8872 speed_max(mddev), desc); in md_do_sync()
8874 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
8882 mddev->resync_mark = mark[last_mark]; in md_do_sync()
8883 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
8892 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
8897 desc, mdname(mddev)); in md_do_sync()
8898 mddev->curr_resync = j; in md_do_sync()
8900 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ in md_do_sync()
8901 mddev->curr_resync_completed = j; in md_do_sync()
8902 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8912 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8913 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
8914 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
8917 (j - mddev->curr_resync_completed)*2 in md_do_sync()
8918 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
8919 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
8922 wait_event(mddev->recovery_wait, in md_do_sync()
8923 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
8924 mddev->curr_resync_completed = j; in md_do_sync()
8925 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
8926 j > mddev->recovery_cp) in md_do_sync()
8927 mddev->recovery_cp = j; in md_do_sync()
8929 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_do_sync()
8930 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8933 while (j >= mddev->resync_max && in md_do_sync()
8934 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8940 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
8941 mddev->resync_max > j in md_do_sync()
8943 &mddev->recovery)); in md_do_sync()
8946 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8949 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
8951 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8957 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
8960 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8968 mddev->curr_resync = j; in md_do_sync()
8969 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
8985 mddev->resync_mark = mark[next]; in md_do_sync()
8986 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
8988 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8992 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9005 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
9006 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
9007 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
9009 if (currspeed > speed_min(mddev)) { in md_do_sync()
9010 if (currspeed > speed_max(mddev)) { in md_do_sync()
9014 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
9019 wait_event(mddev->recovery_wait, in md_do_sync()
9020 !atomic_read(&mddev->recovery_active)); in md_do_sync()
9024 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
9025 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
9031 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
9033 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9034 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9035 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9036 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
9037 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
9039 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
9041 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
9042 mddev->curr_resync >= MD_RESYNC_ACTIVE) { in md_do_sync()
9043 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
9044 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9045 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
9047 desc, mdname(mddev)); in md_do_sync()
9049 &mddev->recovery)) in md_do_sync()
9050 mddev->recovery_cp = in md_do_sync()
9051 mddev->curr_resync_completed; in md_do_sync()
9053 mddev->recovery_cp = in md_do_sync()
9054 mddev->curr_resync; in md_do_sync()
9057 mddev->recovery_cp = MaxSector; in md_do_sync()
9059 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
9060 mddev->curr_resync = MaxSector; in md_do_sync()
9061 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9062 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { in md_do_sync()
9064 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
9066 mddev->delta_disks >= 0 && in md_do_sync()
9070 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
9071 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
9080 set_mask_bits(&mddev->sb_flags, 0, in md_do_sync()
9083 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9084 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9085 mddev->delta_disks > 0 && in md_do_sync()
9086 mddev->pers->finish_reshape && in md_do_sync()
9087 mddev->pers->size && in md_do_sync()
9088 mddev->queue) { in md_do_sync()
9089 mddev_lock_nointr(mddev); in md_do_sync()
9090 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); in md_do_sync()
9091 mddev_unlock(mddev); in md_do_sync()
9092 if (!mddev_is_clustered(mddev)) in md_do_sync()
9093 set_capacity_and_notify(mddev->gendisk, in md_do_sync()
9094 mddev->array_sectors); in md_do_sync()
9097 spin_lock(&mddev->lock); in md_do_sync()
9098 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9100 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9101 mddev->resync_min = 0; in md_do_sync()
9102 mddev->resync_max = MaxSector; in md_do_sync()
9103 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9104 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
9105 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
9106 mddev->curr_resync = MD_RESYNC_NONE; in md_do_sync()
9107 spin_unlock(&mddev->lock); in md_do_sync()
9110 md_wakeup_thread(mddev->thread); in md_do_sync()
9115 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
9123 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in remove_and_add_spares()
9127 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9145 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9153 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
9154 mddev, rdev) == 0) { in remove_and_add_spares()
9155 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
9165 if (removed && mddev->kobj.sd) in remove_and_add_spares()
9166 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in remove_and_add_spares()
9171 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9186 if (mddev->ro && in remove_and_add_spares()
9193 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
9195 sysfs_link_rdev(mddev, rdev); in remove_and_add_spares()
9199 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9204 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9210 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
9212 mddev->sync_thread = md_register_thread(md_do_sync, in md_start_sync()
9213 mddev, in md_start_sync()
9215 if (!mddev->sync_thread) { in md_start_sync()
9217 mdname(mddev)); in md_start_sync()
9219 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
9220 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
9221 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
9222 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
9223 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
9226 &mddev->recovery)) in md_start_sync()
9227 if (mddev->sysfs_action) in md_start_sync()
9228 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9230 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
9231 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9257 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
9259 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { in md_check_recovery()
9263 set_bit(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9265 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) in md_check_recovery()
9266 md_update_sb(mddev, 0); in md_check_recovery()
9267 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9268 wake_up(&mddev->sb_wait); in md_check_recovery()
9271 if (mddev->suspended) in md_check_recovery()
9274 if (mddev->bitmap) in md_check_recovery()
9275 md_bitmap_daemon_work(mddev); in md_check_recovery()
9278 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
9280 mdname(mddev)); in md_check_recovery()
9281 mddev->safemode = 2; in md_check_recovery()
9286 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
9289 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || in md_check_recovery()
9290 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9291 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
9292 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
9293 (mddev->safemode == 2 in md_check_recovery()
9294 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
9298 if (mddev_trylock(mddev)) { in md_check_recovery()
9300 bool try_set_sync = mddev->safemode != 0; in md_check_recovery()
9302 if (!mddev->external && mddev->safemode == 1) in md_check_recovery()
9303 mddev->safemode = 0; in md_check_recovery()
9305 if (mddev->ro) { in md_check_recovery()
9307 if (!mddev->external && mddev->in_sync) in md_check_recovery()
9313 rdev_for_each(rdev, mddev) in md_check_recovery()
9322 remove_and_add_spares(mddev, NULL); in md_check_recovery()
9326 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9327 md_unregister_thread(&mddev->sync_thread); in md_check_recovery()
9328 md_reap_sync_thread(mddev); in md_check_recovery()
9329 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9330 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9331 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_check_recovery()
9335 if (mddev_is_clustered(mddev)) { in md_check_recovery()
9340 rdev_for_each_safe(rdev, tmp, mddev) { in md_check_recovery()
9347 if (try_set_sync && !mddev->external && !mddev->in_sync) { in md_check_recovery()
9348 spin_lock(&mddev->lock); in md_check_recovery()
9349 set_in_sync(mddev); in md_check_recovery()
9350 spin_unlock(&mddev->lock); in md_check_recovery()
9353 if (mddev->sb_flags) in md_check_recovery()
9354 md_update_sb(mddev, 0); in md_check_recovery()
9356 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_check_recovery()
9357 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
9359 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9362 if (mddev->sync_thread) { in md_check_recovery()
9363 md_unregister_thread(&mddev->sync_thread); in md_check_recovery()
9364 md_reap_sync_thread(mddev); in md_check_recovery()
9370 mddev->curr_resync_completed = 0; in md_check_recovery()
9371 spin_lock(&mddev->lock); in md_check_recovery()
9372 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9373 spin_unlock(&mddev->lock); in md_check_recovery()
9377 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9378 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
9380 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9381 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
9390 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
9391 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
9392 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
9395 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
9396 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9397 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
9398 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9399 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
9400 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
9401 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9402 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
9403 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9404 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9405 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
9409 if (mddev->pers->sync_request) { in md_check_recovery()
9415 md_bitmap_write_all(mddev->bitmap); in md_check_recovery()
9417 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
9418 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
9422 if (!mddev->sync_thread) { in md_check_recovery()
9423 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9426 &mddev->recovery)) in md_check_recovery()
9427 if (mddev->sysfs_action) in md_check_recovery()
9428 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
9431 wake_up(&mddev->sb_wait); in md_check_recovery()
9432 mddev_unlock(mddev); in md_check_recovery()
9437 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
9440 sector_t old_dev_sectors = mddev->dev_sectors; in md_reap_sync_thread()
9444 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
9445 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in md_reap_sync_thread()
9446 mddev->degraded != mddev->raid_disks) { in md_reap_sync_thread()
9449 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
9450 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in md_reap_sync_thread()
9451 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_reap_sync_thread()
9454 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
9455 mddev->pers->finish_reshape) { in md_reap_sync_thread()
9456 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
9457 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
9464 if (!mddev->degraded) in md_reap_sync_thread()
9465 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
9468 md_update_sb(mddev, 1); in md_reap_sync_thread()
9472 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) in md_reap_sync_thread()
9473 md_cluster_ops->resync_finish(mddev); in md_reap_sync_thread()
9474 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
9475 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
9476 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
9477 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
9478 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
9479 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
9485 if (mddev_is_clustered(mddev) && is_reshaped in md_reap_sync_thread()
9486 && !test_bit(MD_CLOSING, &mddev->flags)) in md_reap_sync_thread()
9487 md_cluster_ops->update_size(mddev, old_dev_sectors); in md_reap_sync_thread()
9490 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
9491 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_reap_sync_thread()
9492 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
9494 if (mddev->event_work.func) in md_reap_sync_thread()
9495 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
9499 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
9506 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
9510 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
9515 rdev_for_each(rdev, mddev) { in md_finish_reshape()
9531 struct mddev *mddev = rdev->mddev; in rdev_set_badblocks() local
9543 set_mask_bits(&mddev->sb_flags, 0, in rdev_set_badblocks()
9545 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9570 struct mddev *mddev, *n; in md_notify_reboot() local
9574 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_notify_reboot()
9575 if (!mddev_get(mddev)) in md_notify_reboot()
9578 if (mddev_trylock(mddev)) { in md_notify_reboot()
9579 if (mddev->pers) in md_notify_reboot()
9580 __md_stop_writes(mddev); in md_notify_reboot()
9581 if (mddev->persistent) in md_notify_reboot()
9582 mddev->safemode = 2; in md_notify_reboot()
9583 mddev_unlock(mddev); in md_notify_reboot()
9586 mddev_put(mddev); in md_notify_reboot()
9659 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9669 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { in check_sb_changes()
9670 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); in check_sb_changes()
9674 md_bitmap_update_sb(mddev->bitmap); in check_sb_changes()
9678 rdev_for_each_safe(rdev2, tmp, mddev) { in check_sb_changes()
9704 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9709 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in check_sb_changes()
9710 md_wakeup_thread(mddev->thread); in check_sb_changes()
9719 md_error(mddev, rdev2); in check_sb_changes()
9725 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { in check_sb_changes()
9726 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9735 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9741 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in check_sb_changes()
9742 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9743 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9744 if (mddev->pers->start_reshape) in check_sb_changes()
9745 mddev->pers->start_reshape(mddev); in check_sb_changes()
9746 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9747 mddev->reshape_position != MaxSector && in check_sb_changes()
9750 mddev->reshape_position = MaxSector; in check_sb_changes()
9751 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9752 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9756 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9759 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9773 err = super_types[mddev->major_version]. in read_rdev()
9774 load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9799 mddev->pers->spare_active(mddev)) in read_rdev()
9800 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in read_rdev()
9806 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9812 rdev_for_each_rcu(iter, mddev) { in md_reload_sb()
9824 err = read_rdev(mddev, rdev); in md_reload_sb()
9828 check_sb_changes(mddev, rdev); in md_reload_sb()
9831 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9833 read_rdev(mddev, rdev); in md_reload_sb()
9909 struct mddev *mddev, *n; in md_exit() local
9930 list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { in md_exit()
9931 if (!mddev_get(mddev)) in md_exit()
9934 export_array(mddev); in md_exit()
9935 mddev->ctime = 0; in md_exit()
9936 mddev->hold_active = 0; in md_exit()
9942 mddev_put(mddev); in md_exit()