Lines Matching refs:mddev

198 		md_wakeup_thread(conf->mddev->thread);  in raid5_wakeup_stripe_thread()
271 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
277 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
359 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
405 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
410 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
748 int degraded = conf->mddev->degraded; in has_failed()
750 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
753 if (conf->mddev->reshape_position != MaxSector) in has_failed()
984 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
1223 if (!conf->mddev->external && in ops_run_io()
1224 conf->mddev->sb_flags) { in ops_run_io()
1229 md_check_recovery(conf->mddev); in ops_run_io()
1237 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1240 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1299 if (conf->mddev->gendisk) in ops_run_io()
1301 disk_devt(conf->mddev->gendisk), in ops_run_io()
1346 if (conf->mddev->gendisk) in ops_run_io()
1348 disk_devt(conf->mddev->gendisk), in ops_run_io()
2426 if (conf->mddev->gendisk) in grow_stripes()
2428 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2431 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2503 mddev_suspend(conf->mddev); in resize_chunks()
2517 mddev_resume(conf->mddev); in resize_chunks()
2558 md_allow_write(conf->mddev); in resize_stripes()
2742 static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev, in rdev_mdlock_deref() argument
2746 lockdep_is_held(&mddev->reconfig_mutex)); in rdev_mdlock_deref()
2791 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2819 mdname(conf->mddev), in raid5_end_read_request()
2822 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2826 mdname(conf->mddev), in raid5_end_read_request()
2834 mdname(conf->mddev), in raid5_end_read_request()
2841 mdname(conf->mddev), in raid5_end_read_request()
2845 mdname(conf->mddev), rdev->bdev); in raid5_end_read_request()
2867 md_error(conf->mddev, rdev); in raid5_end_read_request()
2870 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2915 md_error(conf->mddev, rdev); in raid5_end_write_request()
2927 &rdev->mddev->recovery); in raid5_end_write_request()
2940 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2955 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) in raid5_error() argument
2957 struct r5conf *conf = mddev->private; in raid5_error()
2962 mdname(mddev), rdev->bdev); in raid5_error()
2967 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2970 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2971 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2974 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2977 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2981 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid5_error()
2984 set_mask_bits(&mddev->sb_flags, 0, in raid5_error()
2986 r5c_update_on_rdev_error(mddev, rdev); in raid5_error()
3309 mdname(conf->mddev)); in raid5_compute_blocknr()
3566 md_write_inc(conf->mddev, bi); in __add_stripe_bio()
3587 if (conf->mddev->bitmap && firstwrite) { in __add_stripe_bio()
3602 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in __add_stripe_bio()
3677 md_error(conf->mddev, rdev); in handle_failed_stripe()
3678 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3699 md_write_end(conf->mddev); in handle_failed_stripe()
3704 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3720 md_write_end(conf->mddev); in handle_failed_stripe()
3750 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3762 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3785 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3809 conf->mddev->recovery_disabled; in handle_failed_sync()
3811 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
3825 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3918 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
4098 md_write_end(conf->mddev); in handle_stripe_clean_event()
4102 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
4159 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4186 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
4237 if (conf->mddev->queue) in handle_stripe_dirtying()
4238 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4317 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4318 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4406 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4407 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4411 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4533 mdname(conf->mddev), in handle_parity_checks6()
4571 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4572 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4576 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4873 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4874 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
5041 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
5053 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
5241 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5250 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5308 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5318 if (conf->mddev->external) in handle_stripe()
5320 conf->mddev); in handle_stripe()
5327 conf->mddev); in handle_stripe()
5339 md_error(conf->mddev, rdev); in handle_stripe()
5340 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5346 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5355 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5372 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5413 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) in in_chunk_boundary() argument
5415 struct r5conf *conf = mddev->private; in in_chunk_boundary()
5439 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5473 struct mddev *mddev; in raid5_align_endio() local
5483 mddev = rdev->mddev; in raid5_align_endio()
5484 conf = mddev->private; in raid5_align_endio()
5486 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5502 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) in raid5_read_one_chunk() argument
5504 struct r5conf *conf = mddev->private; in raid5_read_one_chunk()
5512 if (!in_chunk_boundary(mddev, raid_bio)) { in raid5_read_one_chunk()
5542 rdev_dec_pending(rdev, mddev); in raid5_read_one_chunk()
5547 &mddev->io_acct_set); in raid5_read_one_chunk()
5580 if (mddev->gendisk) in raid5_read_one_chunk()
5581 trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), in raid5_read_one_chunk()
5591 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) in chunk_aligned_read() argument
5595 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5599 struct r5conf *conf = mddev->private; in chunk_aligned_read()
5606 if (!raid5_read_one_chunk(mddev, raid_bio)) in chunk_aligned_read()
5726 struct mddev *mddev = cb->cb.data; in raid5_unplug() local
5727 struct r5conf *conf = mddev->private; in raid5_unplug()
5755 if (mddev->queue) in raid5_unplug()
5756 trace_block_unplug(mddev->queue, cnt, !from_schedule); in raid5_unplug()
5760 static void release_stripe_plug(struct mddev *mddev, in release_stripe_plug() argument
5764 raid5_unplug, mddev, in release_stripe_plug()
5788 static void make_discard_request(struct mddev *mddev, struct bio *bi) in make_discard_request() argument
5790 struct r5conf *conf = mddev->private; in make_discard_request()
5799 if (mddev->reshape_position != MaxSector) in make_discard_request()
5853 md_write_inc(mddev, bi); in make_discard_request()
5857 if (conf->mddev->bitmap) { in make_discard_request()
5861 md_bitmap_startwrite(mddev->bitmap, in make_discard_request()
5873 release_stripe_plug(mddev, sh); in make_discard_request()
5879 static bool ahead_of_reshape(struct mddev *mddev, sector_t sector, in ahead_of_reshape() argument
5882 return mddev->reshape_backwards ? sector < reshape_sector : in ahead_of_reshape()
5886 static bool range_ahead_of_reshape(struct mddev *mddev, sector_t min, in range_ahead_of_reshape() argument
5889 return mddev->reshape_backwards ? max < reshape_sector : in range_ahead_of_reshape()
5893 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf, in stripe_ahead_of_reshape() argument
5910 if (!range_ahead_of_reshape(mddev, min_sector, max_sector, in stripe_ahead_of_reshape()
5969 static enum stripe_result make_stripe_request(struct mddev *mddev, in make_stripe_request() argument
5993 if (ahead_of_reshape(mddev, logical_sector, in make_stripe_request()
5997 if (ahead_of_reshape(mddev, logical_sector, in make_stripe_request()
6023 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
6048 md_wakeup_thread(mddev->thread); in make_stripe_request()
6074 release_stripe_plug(mddev, sh); in make_stripe_request()
6082 static bool raid5_make_request(struct mddev *mddev, struct bio * bi) in raid5_make_request() argument
6085 struct r5conf *conf = mddev->private; in raid5_make_request()
6098 if (md_flush_request(mddev, bi)) in raid5_make_request()
6109 if (!md_write_start(mddev, bi)) in raid5_make_request()
6116 if (rw == READ && mddev->degraded == 0 && in raid5_make_request()
6117 mddev->reshape_position == MaxSector) { in raid5_make_request()
6118 bi = chunk_aligned_read(mddev, bi); in raid5_make_request()
6124 make_discard_request(mddev, bi); in raid5_make_request()
6125 md_write_end(mddev); in raid5_make_request()
6144 !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) && in raid5_make_request()
6145 ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) { in raid5_make_request()
6148 md_write_end(mddev); in raid5_make_request()
6151 md_account_bio(mddev, &bi); in raid5_make_request()
6155 res = make_stripe_request(mddev, conf, &ctx, logical_sector, in raid5_make_request()
6194 md_write_end(mddev); in raid5_make_request()
6199 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
6201 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
6212 struct r5conf *conf = mddev->private; in reshape_request()
6229 if (mddev->reshape_backwards && in reshape_request()
6230 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
6231 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
6233 } else if (mddev->reshape_backwards && in reshape_request()
6237 } else if (!mddev->reshape_backwards && in reshape_request()
6242 mddev->curr_resync_completed = sector_nr; in reshape_request()
6243 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6269 if (mddev->reshape_backwards) { in reshape_request()
6287 if (mddev->reshape_backwards) { in reshape_request()
6290 BUG_ON((mddev->dev_sectors & in reshape_request()
6325 if ((mddev->reshape_backwards in reshape_request()
6332 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6335 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6336 mddev->curr_resync_completed = sector_nr; in reshape_request()
6337 if (!mddev->reshape_backwards) in reshape_request()
6339 rdev_for_each(rdev, mddev) in reshape_request()
6347 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6348 md_wakeup_thread(mddev->thread); in reshape_request()
6349 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
6350 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6351 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6354 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6357 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6379 if (s < raid5_size(mddev, 0, 0)) { in reshape_request()
6394 if (mddev->reshape_backwards) in reshape_request()
6411 if (last_sector >= mddev->dev_sectors) in reshape_request()
6412 last_sector = mddev->dev_sectors - 1; in reshape_request()
6435 if (mddev->curr_resync_completed > mddev->resync_max || in reshape_request()
6436 (sector_nr - mddev->curr_resync_completed) * 2 in reshape_request()
6437 >= mddev->resync_max - mddev->curr_resync_completed) { in reshape_request()
6441 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6444 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6445 mddev->curr_resync_completed = sector_nr; in reshape_request()
6446 if (!mddev->reshape_backwards) in reshape_request()
6448 rdev_for_each(rdev, mddev) in reshape_request()
6455 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
6456 md_wakeup_thread(mddev->thread); in reshape_request()
6457 wait_event(mddev->sb_wait, in reshape_request()
6458 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) in reshape_request()
6459 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
6460 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in reshape_request()
6463 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6466 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
6472 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, in raid5_sync_request() argument
6475 struct r5conf *conf = mddev->private; in raid5_sync_request()
6477 sector_t max_sector = mddev->dev_sectors; in raid5_sync_request()
6485 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid5_sync_request()
6490 if (mddev->curr_resync < max_sector) /* aborted */ in raid5_sync_request()
6491 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid5_sync_request()
6495 md_bitmap_close_sync(mddev->bitmap); in raid5_sync_request()
6503 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid5_sync_request()
6504 return reshape_request(mddev, sector_nr, skipped); in raid5_sync_request()
6516 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6517 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid5_sync_request()
6518 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()
6522 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid5_sync_request()
6524 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid5_sync_request()
6533 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); in raid5_sync_request()
6557 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); in raid5_sync_request()
6685 struct mddev *mddev = conf->mddev; in raid5_do_work() local
6706 wait_event_lock_irq(mddev->sb_wait, in raid5_do_work()
6707 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5_do_work()
6733 struct mddev *mddev = thread->mddev; in raid5d() local
6734 struct r5conf *conf = mddev->private; in raid5d()
6740 md_check_recovery(mddev); in raid5d()
6759 md_bitmap_unplug(mddev->bitmap); in raid5d()
6782 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { in raid5d()
6784 md_check_recovery(mddev); in raid5d()
6795 wait_event_lock_irq(mddev->sb_wait, in raid5d()
6796 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), in raid5d()
6823 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) in raid5_show_stripe_cache_size() argument
6827 spin_lock(&mddev->lock); in raid5_show_stripe_cache_size()
6828 conf = mddev->private; in raid5_show_stripe_cache_size()
6831 spin_unlock(&mddev->lock); in raid5_show_stripe_cache_size()
6836 raid5_set_cache_size(struct mddev *mddev, int size) in raid5_set_cache_size() argument
6839 struct r5conf *conf = mddev->private; in raid5_set_cache_size()
6851 md_allow_write(mddev); in raid5_set_cache_size()
6867 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_cache_size() argument
6877 err = mddev_lock(mddev); in raid5_store_stripe_cache_size()
6880 conf = mddev->private; in raid5_store_stripe_cache_size()
6884 err = raid5_set_cache_size(mddev, new); in raid5_store_stripe_cache_size()
6885 mddev_unlock(mddev); in raid5_store_stripe_cache_size()
6896 raid5_show_rmw_level(struct mddev *mddev, char *page) in raid5_show_rmw_level() argument
6898 struct r5conf *conf = mddev->private; in raid5_show_rmw_level()
6906 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) in raid5_store_rmw_level() argument
6908 struct r5conf *conf = mddev->private; in raid5_store_rmw_level()
6938 raid5_show_stripe_size(struct mddev *mddev, char *page) in raid5_show_stripe_size() argument
6943 spin_lock(&mddev->lock); in raid5_show_stripe_size()
6944 conf = mddev->private; in raid5_show_stripe_size()
6947 spin_unlock(&mddev->lock); in raid5_show_stripe_size()
6953 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) in raid5_store_stripe_size() argument
6975 err = mddev_lock(mddev); in raid5_store_stripe_size()
6979 conf = mddev->private; in raid5_store_stripe_size()
6991 if (mddev->sync_thread || in raid5_store_stripe_size()
6992 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in raid5_store_stripe_size()
6993 mddev->reshape_position != MaxSector || in raid5_store_stripe_size()
6994 mddev->sysfs_active) { in raid5_store_stripe_size()
6999 mddev_suspend(mddev); in raid5_store_stripe_size()
7010 mdname(mddev)); in raid5_store_stripe_size()
7014 mddev_resume(mddev); in raid5_store_stripe_size()
7017 mddev_unlock(mddev); in raid5_store_stripe_size()
7033 raid5_show_preread_threshold(struct mddev *mddev, char *page) in raid5_show_preread_threshold() argument
7037 spin_lock(&mddev->lock); in raid5_show_preread_threshold()
7038 conf = mddev->private; in raid5_show_preread_threshold()
7041 spin_unlock(&mddev->lock); in raid5_show_preread_threshold()
7046 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) in raid5_store_preread_threshold() argument
7057 err = mddev_lock(mddev); in raid5_store_preread_threshold()
7060 conf = mddev->private; in raid5_store_preread_threshold()
7067 mddev_unlock(mddev); in raid5_store_preread_threshold()
7078 raid5_show_skip_copy(struct mddev *mddev, char *page) in raid5_show_skip_copy() argument
7082 spin_lock(&mddev->lock); in raid5_show_skip_copy()
7083 conf = mddev->private; in raid5_show_skip_copy()
7086 spin_unlock(&mddev->lock); in raid5_show_skip_copy()
7091 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) in raid5_store_skip_copy() argument
7103 err = mddev_lock(mddev); in raid5_store_skip_copy()
7106 conf = mddev->private; in raid5_store_skip_copy()
7110 struct request_queue *q = mddev->queue; in raid5_store_skip_copy()
7112 mddev_suspend(mddev); in raid5_store_skip_copy()
7118 mddev_resume(mddev); in raid5_store_skip_copy()
7120 mddev_unlock(mddev); in raid5_store_skip_copy()
7130 stripe_cache_active_show(struct mddev *mddev, char *page) in stripe_cache_active_show() argument
7132 struct r5conf *conf = mddev->private; in stripe_cache_active_show()
7143 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) in raid5_show_group_thread_cnt() argument
7147 spin_lock(&mddev->lock); in raid5_show_group_thread_cnt()
7148 conf = mddev->private; in raid5_show_group_thread_cnt()
7151 spin_unlock(&mddev->lock); in raid5_show_group_thread_cnt()
7159 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) in raid5_store_group_thread_cnt() argument
7175 err = mddev_lock(mddev); in raid5_store_group_thread_cnt()
7178 conf = mddev->private; in raid5_store_group_thread_cnt()
7182 mddev_suspend(mddev); in raid5_store_group_thread_cnt()
7200 mddev_resume(mddev); in raid5_store_group_thread_cnt()
7202 mddev_unlock(mddev); in raid5_store_group_thread_cnt()
7283 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid5_size() argument
7285 struct r5conf *conf = mddev->private; in raid5_size()
7288 sectors = mddev->dev_sectors; in raid5_size()
7428 static struct r5conf *setup_conf(struct mddev *mddev) in setup_conf() argument
7440 if (mddev->new_level != 5 in setup_conf()
7441 && mddev->new_level != 4 in setup_conf()
7442 && mddev->new_level != 6) { in setup_conf()
7444 mdname(mddev), mddev->new_level); in setup_conf()
7447 if ((mddev->new_level == 5 in setup_conf()
7448 && !algorithm_valid_raid5(mddev->new_layout)) || in setup_conf()
7449 (mddev->new_level == 6 in setup_conf()
7450 && !algorithm_valid_raid6(mddev->new_layout))) { in setup_conf()
7452 mdname(mddev), mddev->new_layout); in setup_conf()
7455 if (mddev->new_level == 6 && mddev->raid_disks < 4) { in setup_conf()
7457 mdname(mddev), mddev->raid_disks); in setup_conf()
7461 if (!mddev->new_chunk_sectors || in setup_conf()
7462 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()
7463 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()
7465 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()
7512 rdev_for_each(rdev, mddev) { in setup_conf()
7522 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7524 conf->raid_disks = mddev->raid_disks; in setup_conf()
7525 if (mddev->reshape_position == MaxSector) in setup_conf()
7526 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7528 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7546 conf->mddev = mddev; in setup_conf()
7575 conf->level = mddev->new_level; in setup_conf()
7576 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7581 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); in setup_conf()
7584 rdev_for_each(rdev, mddev) { in setup_conf()
7603 mdname(mddev), rdev->bdev, raid_disk); in setup_conf()
7609 conf->level = mddev->new_level; in setup_conf()
7620 conf->algorithm = mddev->new_layout; in setup_conf()
7621 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7623 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7624 conf->prev_algo = mddev->layout; in setup_conf()
7631 if (mddev->reshape_position != MaxSector) { in setup_conf()
7633 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7634 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7638 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7645 mdname(mddev), memory); in setup_conf()
7649 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); in setup_conf()
7660 ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev)); in setup_conf()
7663 mdname(mddev)); in setup_conf()
7667 sprintf(pers_name, "raid%d", mddev->new_level); in setup_conf()
7668 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7671 mdname(mddev)); in setup_conf()
7712 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7716 static int raid5_run(struct mddev *mddev) in raid5_run() argument
7728 if (acct_bioset_init(mddev)) { in raid5_run()
7729 pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev)); in raid5_run()
7733 if (mddev_init_writes_pending(mddev) < 0) { in raid5_run()
7738 if (mddev->recovery_cp != MaxSector) in raid5_run()
7740 mdname(mddev)); in raid5_run()
7742 rdev_for_each(rdev, mddev) { in raid5_run()
7755 } else if (mddev->reshape_backwards && in raid5_run()
7758 else if (!mddev->reshape_backwards && in raid5_run()
7763 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && in raid5_run()
7764 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { in raid5_run()
7766 mdname(mddev)); in raid5_run()
7771 if (mddev->reshape_position != MaxSector) { in raid5_run()
7786 int max_degraded = (mddev->level == 6 ? 2 : 1); in raid5_run()
7792 mdname(mddev)); in raid5_run()
7797 if (mddev->new_level != mddev->level) { in raid5_run()
7799 mdname(mddev)); in raid5_run()
7803 old_disks = mddev->raid_disks - mddev->delta_disks; in raid5_run()
7811 here_new = mddev->reshape_position; in raid5_run()
7812 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()
7813 new_data_disks = mddev->raid_disks - max_degraded; in raid5_run()
7816 mdname(mddev)); in raid5_run()
7822 here_old = mddev->reshape_position; in raid5_run()
7826 if (mddev->delta_disks == 0) { in raid5_run()
7834 if (abs(min_offset_diff) >= mddev->chunk_sectors && in raid5_run()
7835 abs(min_offset_diff) >= mddev->new_chunk_sectors) in raid5_run()
7837 else if (mddev->ro == 0) { in raid5_run()
7839 mdname(mddev)); in raid5_run()
7843 } else if (mddev->reshape_backwards in raid5_run()
7850 mdname(mddev)); in raid5_run()
7854 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); in raid5_run()
7857 BUG_ON(mddev->level != mddev->new_level); in raid5_run()
7858 BUG_ON(mddev->layout != mddev->new_layout); in raid5_run()
7859 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()
7860 BUG_ON(mddev->delta_disks != 0); in raid5_run()
7863 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && in raid5_run()
7864 test_bit(MD_HAS_PPL, &mddev->flags)) { in raid5_run()
7866 mdname(mddev)); in raid5_run()
7867 clear_bit(MD_HAS_PPL, &mddev->flags); in raid5_run()
7868 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); in raid5_run()
7871 if (mddev->private == NULL) in raid5_run()
7872 conf = setup_conf(mddev); in raid5_run()
7874 conf = mddev->private; in raid5_run()
7881 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in raid5_run()
7884 mdname(mddev)); in raid5_run()
7885 mddev->ro = 1; in raid5_run()
7886 set_disk_ro(mddev->gendisk, 1); in raid5_run()
7887 } else if (mddev->recovery_cp == MaxSector) in raid5_run()
7888 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); in raid5_run()
7892 mddev->thread = conf->thread; in raid5_run()
7894 mddev->private = conf; in raid5_run()
7898 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_run()
7901 rdev = rdev_mdlock_deref(mddev, in raid5_run()
7928 if (mddev->major_version == 0 && in raid5_run()
7929 mddev->minor_version > 90) in raid5_run()
7951 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7955 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7960 mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); in raid5_run()
7961 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()
7963 if (mddev->degraded > dirty_parity_disks && in raid5_run()
7964 mddev->recovery_cp != MaxSector) { in raid5_run()
7965 if (test_bit(MD_HAS_PPL, &mddev->flags)) in raid5_run()
7967 mdname(mddev)); in raid5_run()
7968 else if (mddev->ok_start_degraded) in raid5_run()
7970 mdname(mddev)); in raid5_run()
7973 mdname(mddev)); in raid5_run()
7979 mdname(mddev), conf->level, in raid5_run()
7980 mddev->raid_disks-mddev->degraded, mddev->raid_disks, in raid5_run()
7981 mddev->new_layout); in raid5_run()
7988 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_run()
7989 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_run()
7990 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_run()
7991 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_run()
7992 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_run()
7994 if (!mddev->sync_thread) in raid5_run()
7999 if (mddev->to_remove == &raid5_attrs_group) in raid5_run()
8000 mddev->to_remove = NULL; in raid5_run()
8001 else if (mddev->kobj.sd && in raid5_run()
8002 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) in raid5_run()
8004 mdname(mddev)); in raid5_run()
8005 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); in raid5_run()
8007 if (mddev->queue) { in raid5_run()
8015 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in raid5_run()
8017 chunk_size = mddev->chunk_sectors << 9; in raid5_run()
8018 blk_queue_io_min(mddev->queue, chunk_size); in raid5_run()
8020 mddev->queue->limits.raid_partial_stripes_expensive = 1; in raid5_run()
8027 mddev->queue->limits.discard_granularity = stripe; in raid5_run()
8029 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid5_run()
8031 rdev_for_each(rdev, mddev) { in raid5_run()
8032 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
8034 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid5_run()
8054 mddev->queue->limits.max_discard_sectors < (stripe >> 9) || in raid5_run()
8055 mddev->queue->limits.discard_granularity < stripe) in raid5_run()
8056 blk_queue_max_discard_sectors(mddev->queue, 0); in raid5_run()
8062 blk_queue_max_hw_sectors(mddev->queue, in raid5_run()
8066 blk_queue_max_segments(mddev->queue, USHRT_MAX); in raid5_run()
8074 md_unregister_thread(&mddev->thread); in raid5_run()
8077 mddev->private = NULL; in raid5_run()
8078 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); in raid5_run()
8081 acct_bioset_exit(mddev); in raid5_run()
8085 static void raid5_free(struct mddev *mddev, void *priv) in raid5_free() argument
8090 acct_bioset_exit(mddev); in raid5_free()
8091 mddev->to_remove = &raid5_attrs_group; in raid5_free()
8094 static void raid5_status(struct seq_file *seq, struct mddev *mddev) in raid5_status() argument
8096 struct r5conf *conf = mddev->private; in raid5_status()
8099 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, in raid5_status()
8100 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
8101 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
8123 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
8136 static int raid5_spare_active(struct mddev *mddev) in raid5_spare_active() argument
8139 struct r5conf *conf = mddev->private; in raid5_spare_active()
8145 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); in raid5_spare_active()
8146 replacement = rdev_mdlock_deref(mddev, in raid5_spare_active()
8175 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
8181 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_remove_disk() argument
8183 struct r5conf *conf = mddev->private; in raid5_remove_disk()
8229 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
8239 lockdep_assert_held(&mddev->reconfig_mutex); in raid5_remove_disk()
8274 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid5_add_disk() argument
8276 struct r5conf *conf = mddev->private; in raid5_add_disk()
8303 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8338 tmp = rdev_mdlock_deref(mddev, p->rdev); in raid5_add_disk()
8355 static int raid5_resize(struct mddev *mddev, sector_t sectors) in raid5_resize() argument
8365 struct r5conf *conf = mddev->private; in raid5_resize()
8370 newsize = raid5_size(mddev, sectors, mddev->raid_disks); in raid5_resize()
8371 if (mddev->external_size && in raid5_resize()
8372 mddev->array_sectors > newsize) in raid5_resize()
8374 if (mddev->bitmap) { in raid5_resize()
8375 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); in raid5_resize()
8379 md_set_array_sectors(mddev, newsize); in raid5_resize()
8380 if (sectors > mddev->dev_sectors && in raid5_resize()
8381 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()
8382 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()
8383 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid5_resize()
8385 mddev->dev_sectors = sectors; in raid5_resize()
8386 mddev->resync_max_sectors = sectors; in raid5_resize()
8390 static int check_stripe_cache(struct mddev *mddev) in check_stripe_cache() argument
8400 struct r5conf *conf = mddev->private; in check_stripe_cache()
8401 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8403 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8406 mdname(mddev), in check_stripe_cache()
8407 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
8414 static int check_reshape(struct mddev *mddev) in check_reshape() argument
8416 struct r5conf *conf = mddev->private; in check_reshape()
8420 if (mddev->delta_disks == 0 && in check_reshape()
8421 mddev->new_layout == mddev->layout && in check_reshape()
8422 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
8426 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { in check_reshape()
8433 if (mddev->level == 6) in check_reshape()
8435 if (mddev->raid_disks + mddev->delta_disks < min) in check_reshape()
8439 if (!check_stripe_cache(mddev)) in check_reshape()
8442 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
8443 mddev->delta_disks > 0) in check_reshape()
8446 + max(0, mddev->delta_disks), in check_reshape()
8447 max(mddev->new_chunk_sectors, in check_reshape()
8448 mddev->chunk_sectors) in check_reshape()
8452 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8455 + mddev->delta_disks)); in check_reshape()
8458 static int raid5_start_reshape(struct mddev *mddev) in raid5_start_reshape() argument
8460 struct r5conf *conf = mddev->private; in raid5_start_reshape()
8465 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid5_start_reshape()
8468 if (!check_stripe_cache(mddev)) in raid5_start_reshape()
8474 rdev_for_each(rdev, mddev) { in raid5_start_reshape()
8480 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8490 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8491 < mddev->array_sectors) { in raid5_start_reshape()
8493 mdname(mddev)); in raid5_start_reshape()
8501 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8503 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8505 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8511 if (mddev->reshape_backwards) in raid5_start_reshape()
8512 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8523 mddev_suspend(mddev); in raid5_start_reshape()
8524 mddev_resume(mddev); in raid5_start_reshape()
8533 if (mddev->delta_disks >= 0) { in raid5_start_reshape()
8534 rdev_for_each(rdev, mddev) in raid5_start_reshape()
8537 if (raid5_add_disk(mddev, rdev) == 0) { in raid5_start_reshape()
8545 sysfs_link_rdev(mddev, rdev); in raid5_start_reshape()
8558 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8561 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8562 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8563 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_start_reshape()
8565 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid5_start_reshape()
8566 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid5_start_reshape()
8567 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid5_start_reshape()
8568 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid5_start_reshape()
8569 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid5_start_reshape()
8570 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid5_start_reshape()
8572 if (!mddev->sync_thread) { in raid5_start_reshape()
8573 mddev->recovery = 0; in raid5_start_reshape()
8576 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
8577 mddev->new_chunk_sectors = in raid5_start_reshape()
8579 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
8580 rdev_for_each(rdev, mddev) in raid5_start_reshape()
8585 mddev->reshape_position = MaxSector; in raid5_start_reshape()
8591 md_wakeup_thread(mddev->sync_thread); in raid5_start_reshape()
8602 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8607 md_finish_reshape(conf->mddev); in end_reshape()
8610 conf->mddev->reshape_position = MaxSector; in end_reshape()
8611 rdev_for_each(rdev, conf->mddev) in end_reshape()
8619 if (conf->mddev->queue) in end_reshape()
8627 static void raid5_finish_reshape(struct mddev *mddev) in raid5_finish_reshape() argument
8629 struct r5conf *conf = mddev->private; in raid5_finish_reshape()
8632 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in raid5_finish_reshape()
8634 if (mddev->delta_disks <= 0) { in raid5_finish_reshape()
8637 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8640 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8642 rdev = rdev_mdlock_deref(mddev, in raid5_finish_reshape()
8646 rdev = rdev_mdlock_deref(mddev, in raid5_finish_reshape()
8652 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8653 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8654 mddev->reshape_position = MaxSector; in raid5_finish_reshape()
8655 mddev->delta_disks = 0; in raid5_finish_reshape()
8656 mddev->reshape_backwards = 0; in raid5_finish_reshape()
8660 static void raid5_quiesce(struct mddev *mddev, int quiesce) in raid5_quiesce() argument
8662 struct r5conf *conf = mddev->private; in raid5_quiesce()
8695 static void *raid45_takeover_raid0(struct mddev *mddev, int level) in raid45_takeover_raid0() argument
8697 struct r0conf *raid0_conf = mddev->private; in raid45_takeover_raid0()
8703 mdname(mddev)); in raid45_takeover_raid0()
8709 mddev->dev_sectors = sectors; in raid45_takeover_raid0()
8710 mddev->new_level = level; in raid45_takeover_raid0()
8711 mddev->new_layout = ALGORITHM_PARITY_N; in raid45_takeover_raid0()
8712 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
8713 mddev->raid_disks += 1; in raid45_takeover_raid0()
8714 mddev->delta_disks = 1; in raid45_takeover_raid0()
8716 mddev->recovery_cp = MaxSector; in raid45_takeover_raid0()
8718 return setup_conf(mddev); in raid45_takeover_raid0()
8721 static void *raid5_takeover_raid1(struct mddev *mddev) in raid5_takeover_raid1() argument
8726 if (mddev->raid_disks != 2 || in raid5_takeover_raid1()
8727 mddev->degraded > 1) in raid5_takeover_raid1()
8735 while (chunksect && (mddev->array_sectors & (chunksect-1))) in raid5_takeover_raid1()
8738 if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private)) in raid5_takeover_raid1()
8742 mddev->new_level = 5; in raid5_takeover_raid1()
8743 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; in raid5_takeover_raid1()
8744 mddev->new_chunk_sectors = chunksect; in raid5_takeover_raid1()
8746 ret = setup_conf(mddev); in raid5_takeover_raid1()
8748 mddev_clear_unsupported_flags(mddev, in raid5_takeover_raid1()
8753 static void *raid5_takeover_raid6(struct mddev *mddev) in raid5_takeover_raid6() argument
8757 switch (mddev->layout) { in raid5_takeover_raid6()
8779 mddev->new_level = 5; in raid5_takeover_raid6()
8780 mddev->new_layout = new_layout; in raid5_takeover_raid6()
8781 mddev->delta_disks = -1; in raid5_takeover_raid6()
8782 mddev->raid_disks -= 1; in raid5_takeover_raid6()
8783 return setup_conf(mddev); in raid5_takeover_raid6()
8786 static int raid5_check_reshape(struct mddev *mddev) in raid5_check_reshape() argument
8793 struct r5conf *conf = mddev->private; in raid5_check_reshape()
8794 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape()
8796 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) in raid5_check_reshape()
8803 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape()
8810 if (mddev->raid_disks == 2) { in raid5_check_reshape()
8812 if (mddev->new_layout >= 0) { in raid5_check_reshape()
8813 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8814 mddev->layout = mddev->new_layout; in raid5_check_reshape()
8818 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()
8820 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid5_check_reshape()
8821 md_wakeup_thread(mddev->thread); in raid5_check_reshape()
8823 return check_reshape(mddev); in raid5_check_reshape()
8826 static int raid6_check_reshape(struct mddev *mddev) in raid6_check_reshape() argument
8828 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape()
8830 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) in raid6_check_reshape()
8837 if (mddev->array_sectors & (new_chunk-1)) in raid6_check_reshape()
8843 return check_reshape(mddev); in raid6_check_reshape()
8846 static void *raid5_takeover(struct mddev *mddev) in raid5_takeover() argument
8854 if (mddev->level == 0) in raid5_takeover()
8855 return raid45_takeover_raid0(mddev, 5); in raid5_takeover()
8856 if (mddev->level == 1) in raid5_takeover()
8857 return raid5_takeover_raid1(mddev); in raid5_takeover()
8858 if (mddev->level == 4) { in raid5_takeover()
8859 mddev->new_layout = ALGORITHM_PARITY_N; in raid5_takeover()
8860 mddev->new_level = 5; in raid5_takeover()
8861 return setup_conf(mddev); in raid5_takeover()
8863 if (mddev->level == 6) in raid5_takeover()
8864 return raid5_takeover_raid6(mddev); in raid5_takeover()
8869 static void *raid4_takeover(struct mddev *mddev) in raid4_takeover() argument
8875 if (mddev->level == 0) in raid4_takeover()
8876 return raid45_takeover_raid0(mddev, 4); in raid4_takeover()
8877 if (mddev->level == 5 && in raid4_takeover()
8878 mddev->layout == ALGORITHM_PARITY_N) { in raid4_takeover()
8879 mddev->new_layout = 0; in raid4_takeover()
8880 mddev->new_level = 4; in raid4_takeover()
8881 return setup_conf(mddev); in raid4_takeover()
8888 static void *raid6_takeover(struct mddev *mddev) in raid6_takeover() argument
8896 if (mddev->pers != &raid5_personality) in raid6_takeover()
8898 if (mddev->degraded > 1) in raid6_takeover()
8900 if (mddev->raid_disks > 253) in raid6_takeover()
8902 if (mddev->raid_disks < 3) in raid6_takeover()
8905 switch (mddev->layout) { in raid6_takeover()
8927 mddev->new_level = 6; in raid6_takeover()
8928 mddev->new_layout = new_layout; in raid6_takeover()
8929 mddev->delta_disks = 1; in raid6_takeover()
8930 mddev->raid_disks += 1; in raid6_takeover()
8931 return setup_conf(mddev); in raid6_takeover()
8934 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) in raid5_change_consistency_policy() argument
8939 err = mddev_lock(mddev); in raid5_change_consistency_policy()
8942 conf = mddev->private; in raid5_change_consistency_policy()
8944 mddev_unlock(mddev); in raid5_change_consistency_policy()
8955 mddev_suspend(mddev); in raid5_change_consistency_policy()
8957 mddev_resume(mddev); in raid5_change_consistency_policy()
8964 mddev_suspend(mddev); in raid5_change_consistency_policy()
8966 mddev_resume(mddev); in raid5_change_consistency_policy()
8968 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8973 rdev_for_each(rdev, mddev) in raid5_change_consistency_policy()
8980 mddev_suspend(mddev); in raid5_change_consistency_policy()
8981 clear_bit(MD_HAS_JOURNAL, &mddev->flags); in raid5_change_consistency_policy()
8982 mddev_resume(mddev); in raid5_change_consistency_policy()
8992 md_update_sb(mddev, 1); in raid5_change_consistency_policy()
8994 mddev_unlock(mddev); in raid5_change_consistency_policy()
8999 static int raid5_start(struct mddev *mddev) in raid5_start() argument
9001 struct r5conf *conf = mddev->private; in raid5_start()