Lines Matching refs:mddev
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
191 &conf->mddev->recovery)) { in r10buf_pool_alloc()
277 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
285 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
301 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
302 struct r10conf *conf = mddev->private; in reschedule_retry()
312 md_wakeup_thread(mddev->thread); in reschedule_retry()
323 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
345 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
384 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
416 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
422 mdname(conf->mddev), in raid10_end_read_request()
433 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
437 md_write_end(r10_bio->mddev); in close_write()
460 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
485 md_error(rdev->mddev, rdev); in raid10_end_write_request()
490 &rdev->mddev->recovery); in raid10_end_write_request()
495 md_error(rdev->mddev, rdev); in raid10_end_write_request()
559 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
658 conf->mddev->reshape_backwards)) { in raid10_find_phys()
763 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
765 (mddev_is_clustered(conf->mddev) && in read_balance()
766 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
907 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
999 if (conf->mddev->thread->tsk == current && in stop_waiting_barrier()
1000 test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) && in stop_waiting_barrier()
1038 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
1096 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1106 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1107 struct r10conf *conf = mddev->private; in raid10_unplug()
1115 md_wakeup_thread(mddev->thread); in raid10_unplug()
1122 md_bitmap_unplug(mddev->bitmap); in raid10_unplug()
1149 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1157 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in regular_request_wait()
1165 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1175 static void raid10_read_request(struct mddev *mddev, struct bio *bio, in raid10_read_request() argument
1178 struct r10conf *conf = mddev->private; in raid10_read_request()
1217 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) in raid10_read_request()
1223 mdname(mddev), b, in raid10_read_request()
1231 mdname(mddev), in raid10_read_request()
1249 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set); in raid10_read_request()
1263 if (mddev->gendisk) in raid10_read_request()
1264 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk), in raid10_read_request()
1270 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1280 struct r10conf *conf = mddev->private; in raid10_write_one_disk()
1295 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set); in raid10_write_one_disk()
1311 if (conf->mddev->gendisk) in raid10_write_one_disk()
1312 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1319 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); in raid10_write_one_disk()
1330 md_wakeup_thread(mddev->thread); in raid10_write_one_disk()
1334 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio) in wait_blocked_dev() argument
1337 struct r10conf *conf = mddev->private; in wait_blocked_dev()
1392 raid10_log(conf->mddev, "%s wait rdev %d blocked", in wait_blocked_dev()
1394 md_wait_for_blocked_rdev(blocked_rdev, mddev); in wait_blocked_dev()
1400 static void raid10_write_request(struct mddev *mddev, struct bio *bio, in raid10_write_request() argument
1403 struct r10conf *conf = mddev->private; in raid10_write_request()
1408 if ((mddev_is_clustered(mddev) && in raid10_write_request()
1409 md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1421 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1430 if (!regular_request_wait(mddev, conf, bio, sectors)) in raid10_write_request()
1432 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_write_request()
1433 (mddev->reshape_backwards in raid10_write_request()
1439 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1440 set_mask_bits(&mddev->sb_flags, 0, in raid10_write_request()
1442 md_wakeup_thread(mddev->thread); in raid10_write_request()
1448 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1449 wait_event(mddev->sb_wait, in raid10_write_request()
1450 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in raid10_write_request()
1452 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1468 wait_blocked_dev(mddev, r10_bio); in raid10_write_request()
1552 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1556 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1558 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1563 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) in __make_request() argument
1565 struct r10conf *conf = mddev->private; in __make_request()
1573 r10_bio->mddev = mddev; in __make_request()
1581 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1583 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1588 struct r10conf *conf = r10bio->mddev->private; in raid_end_discard_bio()
1600 md_write_end(r10bio->mddev); in raid_end_discard_bio()
1611 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request()
1636 rdev_dec_pending(rdev, conf->mddev); in raid10_end_discard_request()
1645 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) in raid10_handle_discard() argument
1647 struct r10conf *conf = mddev->private; in raid10_handle_discard()
1666 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_handle_discard()
1679 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_handle_discard()
1757 r10_bio->mddev = mddev; in raid10_handle_discard()
1761 wait_blocked_dev(mddev, r10_bio); in raid10_handle_discard()
1828 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard()
1830 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard()
1835 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard()
1837 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard()
1850 &mddev->bio_set); in raid10_handle_discard()
1856 md_submit_discard_bio(mddev, rdev, mbio, in raid10_handle_discard()
1864 &mddev->bio_set); in raid10_handle_discard()
1870 md_submit_discard_bio(mddev, rrdev, rbio, in raid10_handle_discard()
1896 static bool raid10_make_request(struct mddev *mddev, struct bio *bio) in raid10_make_request() argument
1898 struct r10conf *conf = mddev->private; in raid10_make_request()
1904 && md_flush_request(mddev, bio)) in raid10_make_request()
1907 if (!md_write_start(mddev, bio)) in raid10_make_request()
1911 if (!raid10_handle_discard(mddev, bio)) in raid10_make_request()
1926 __make_request(mddev, bio, sectors); in raid10_make_request()
1933 static void raid10_status(struct seq_file *seq, struct mddev *mddev) in raid10_status() argument
1935 struct r10conf *conf = mddev->private; in raid10_status()
1939 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
1951 conf->geo.raid_disks - mddev->degraded); in raid10_status()
2028 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) in raid10_error() argument
2030 struct r10conf *conf = mddev->private; in raid10_error()
2036 set_bit(MD_BROKEN, &mddev->flags); in raid10_error()
2038 if (!mddev->fail_last_dev) { in raid10_error()
2044 mddev->degraded++; in raid10_error()
2046 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid10_error()
2049 set_mask_bits(&mddev->sb_flags, 0, in raid10_error()
2054 mdname(mddev), rdev->bdev, in raid10_error()
2055 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
2068 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
2091 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
2094 struct r10conf *conf = mddev->private; in raid10_spare_active()
2132 mddev->degraded -= count; in raid10_spare_active()
2139 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
2141 struct r10conf *conf = mddev->private; in raid10_add_disk()
2147 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
2155 if (md_integrity_add_rdev(rdev, mddev)) in raid10_add_disk()
2169 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
2179 if (mddev->gendisk) in raid10_add_disk()
2180 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
2187 if (mddev->gendisk) in raid10_add_disk()
2188 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
2192 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
2205 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
2207 struct r10conf *conf = mddev->private; in raid10_remove_disk()
2214 if (unlikely(number >= mddev->raid_disks)) in raid10_remove_disk()
2233 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
2261 err = md_integrity_register(mddev); in raid10_remove_disk()
2271 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
2285 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
2298 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
2314 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
2325 md_done_sync(mddev, s, 1); in end_sync_request()
2342 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
2343 struct r10conf *conf = mddev->private; in end_sync_write()
2359 md_error(mddev, rdev); in end_sync_write()
2364 &rdev->mddev->recovery); in end_sync_write()
2373 rdev_dec_pending(rdev, mddev); in end_sync_write()
2394 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2396 struct r10conf *conf = mddev->private; in sync_request_write()
2453 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2454 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2459 md_error(rdev->mddev, rdev); in sync_request_write()
2510 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2534 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2535 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2573 &rdev->mddev->recovery); in fix_recovery_read_error()
2591 mdname(mddev)); in fix_recovery_read_error()
2594 = mddev->recovery_disabled; in fix_recovery_read_error()
2596 &mddev->recovery); in fix_recovery_read_error()
2608 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2610 struct r10conf *conf = mddev->private; in recovery_request_write()
2652 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2698 &rdev->mddev->recovery); in r10_sync_page_io()
2702 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2714 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2719 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2732 check_decay_read_errors(mddev, rdev); in fix_read_error()
2736 mdname(mddev), rdev->bdev, in fix_read_error()
2739 mdname(mddev), rdev->bdev); in fix_read_error()
2740 md_error(mddev, rdev); in fix_read_error()
2774 rdev_dec_pending(rdev, mddev); in fix_read_error()
2798 md_error(mddev, rdev); in fix_read_error()
2828 mdname(mddev), s, in fix_read_error()
2835 mdname(mddev), in fix_read_error()
2838 rdev_dec_pending(rdev, mddev); in fix_read_error()
2862 mdname(mddev), s, in fix_read_error()
2868 mdname(mddev), in fix_read_error()
2873 mdname(mddev), s, in fix_read_error()
2881 rdev_dec_pending(rdev, mddev); in fix_read_error()
2894 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2895 struct r10conf *conf = mddev->private; in narrow_write_error()
2931 &mddev->bio_set); in narrow_write_error()
2952 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2956 struct r10conf *conf = mddev->private; in handle_read_error()
2971 if (mddev->ro) in handle_read_error()
2975 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2978 md_error(mddev, rdev); in handle_read_error()
2980 rdev_dec_pending(rdev, mddev); in handle_read_error()
2983 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
3015 md_error(conf->mddev, rdev); in handle_write_completed()
3032 md_error(conf->mddev, rdev); in handle_write_completed()
3047 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3051 md_error(conf->mddev, rdev); in handle_write_completed()
3055 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3064 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
3077 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
3089 struct mddev *mddev = thread->mddev; in raid10d() local
3092 struct r10conf *conf = mddev->private; in raid10d()
3096 md_check_recovery(mddev); in raid10d()
3099 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
3102 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
3113 if (mddev->degraded) in raid10d()
3138 mddev = r10_bio->mddev; in raid10d()
3139 conf = mddev->private; in raid10d()
3144 reshape_request_write(mddev, r10_bio); in raid10d()
3146 sync_request_write(mddev, r10_bio); in raid10d()
3148 recovery_request_write(mddev, r10_bio); in raid10d()
3150 handle_read_error(mddev, r10_bio); in raid10d()
3155 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid10d()
3156 md_check_recovery(mddev); in raid10d()
3187 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
3188 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
3234 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
3277 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
3280 struct r10conf *conf = mddev->private; in raid10_sync_request()
3300 if (mddev->bitmap == NULL && in raid10_sync_request()
3301 mddev->recovery_cp == MaxSector && in raid10_sync_request()
3302 mddev->reshape_position == MaxSector && in raid10_sync_request()
3303 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid10_sync_request()
3304 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid10_sync_request()
3305 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_sync_request()
3308 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
3312 max_sector = mddev->dev_sectors; in raid10_sync_request()
3313 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in raid10_sync_request()
3314 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
3315 max_sector = mddev->resync_max_sectors; in raid10_sync_request()
3329 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid10_sync_request()
3335 if (mddev->curr_resync < max_sector) { /* aborted */ in raid10_sync_request()
3336 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in raid10_sync_request()
3337 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid10_sync_request()
3341 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
3342 md_bitmap_end_sync(mddev->bitmap, sect, in raid10_sync_request()
3347 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
3349 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3364 md_bitmap_close_sync(mddev->bitmap); in raid10_sync_request()
3370 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
3371 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3381 if (max_sector > mddev->resync_max) in raid10_sync_request()
3382 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid10_sync_request()
3414 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3451 if (sect >= mddev->resync_max_sectors) { in raid10_sync_request()
3464 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3491 r10_bio->mddev = mddev; in raid10_sync_request()
3510 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3626 &mddev->recovery)) in raid10_sync_request()
3628 mdname(mddev)); in raid10_sync_request()
3630 = mddev->recovery_disabled; in raid10_sync_request()
3636 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3638 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3641 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3643 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3682 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3683 mddev_is_clustered(mddev) && in raid10_sync_request()
3686 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3687 &sync_blocks, mddev->degraded) && in raid10_sync_request()
3689 &mddev->recovery)) { in raid10_sync_request()
3699 r10_bio->mddev = mddev; in raid10_sync_request()
3781 mddev); in raid10_sync_request()
3786 mddev); in raid10_sync_request()
3818 if (mddev_is_clustered(mddev) && in raid10_sync_request()
3819 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3822 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3825 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3829 } else if (mddev_is_clustered(mddev)) { in raid10_sync_request()
3849 mddev->curr_resync_completed, i); in raid10_sync_request()
3858 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3883 md_done_sync(mddev, sectors_skipped, 1); in raid10_sync_request()
3901 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3904 struct r10conf *conf = mddev->private; in raid10_size()
3951 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3957 layout = mddev->layout; in setup_geo()
3958 chunk = mddev->chunk_sectors; in setup_geo()
3959 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3962 layout = mddev->new_layout; in setup_geo()
3963 chunk = mddev->new_chunk_sectors; in setup_geo()
3964 disks = mddev->raid_disks; in setup_geo()
3969 layout = mddev->new_layout; in setup_geo()
3970 chunk = mddev->new_chunk_sectors; in setup_geo()
3971 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
4007 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
4014 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
4018 mdname(mddev), PAGE_SIZE); in setup_conf()
4022 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
4024 mdname(mddev), mddev->new_layout); in setup_conf()
4034 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
4055 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
4056 if (mddev->reshape_position == MaxSector) { in setup_conf()
4060 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
4064 conf->reshape_progress = mddev->reshape_position; in setup_conf()
4081 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
4085 conf->mddev = mddev; in setup_conf()
4105 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt()
4109 static int raid10_run(struct mddev *mddev) in raid10_run() argument
4119 if (mddev_init_writes_pending(mddev) < 0) in raid10_run()
4122 if (mddev->private == NULL) { in raid10_run()
4123 conf = setup_conf(mddev); in raid10_run()
4126 mddev->private = conf; in raid10_run()
4128 conf = mddev->private; in raid10_run()
4132 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
4135 fc = (mddev->layout >> 8) & 255; in raid10_run()
4136 fo = mddev->layout & (1<<16); in raid10_run()
4144 mddev->thread = conf->thread; in raid10_run()
4147 if (mddev->queue) { in raid10_run()
4148 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid10_run()
4149 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid10_run()
4153 rdev_for_each(rdev, mddev) { in raid10_run()
4174 if (!mddev->reshape_backwards) in raid10_run()
4181 if (mddev->gendisk) in raid10_run()
4182 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_run()
4192 mdname(mddev)); in raid10_run()
4206 mddev->degraded = 0; in raid10_run()
4224 mddev->degraded++; in raid10_run()
4236 disk->recovery_disabled = mddev->recovery_disabled - 1; in raid10_run()
4239 if (mddev->recovery_cp != MaxSector) in raid10_run()
4241 mdname(mddev)); in raid10_run()
4243 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
4248 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
4249 size = raid10_size(mddev, 0, 0); in raid10_run()
4250 md_set_array_sectors(mddev, size); in raid10_run()
4251 mddev->resync_max_sectors = size; in raid10_run()
4252 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid10_run()
4254 if (md_integrity_register(mddev)) in raid10_run()
4272 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_run()
4273 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_run()
4274 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_run()
4275 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_run()
4276 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_run()
4278 if (!mddev->sync_thread) in raid10_run()
4285 md_unregister_thread(&mddev->thread); in raid10_run()
4290 mddev->private = NULL; in raid10_run()
4295 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
4308 static void raid10_quiesce(struct mddev *mddev, int quiesce) in raid10_quiesce() argument
4310 struct r10conf *conf = mddev->private; in raid10_quiesce()
4318 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
4332 struct r10conf *conf = mddev->private; in raid10_resize()
4335 if (mddev->reshape_position != MaxSector) in raid10_resize()
4341 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
4342 size = raid10_size(mddev, sectors, 0); in raid10_resize()
4343 if (mddev->external_size && in raid10_resize()
4344 mddev->array_sectors > size) in raid10_resize()
4346 if (mddev->bitmap) { in raid10_resize()
4347 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
4351 md_set_array_sectors(mddev, size); in raid10_resize()
4352 if (sectors > mddev->dev_sectors && in raid10_resize()
4353 mddev->recovery_cp > oldsize) { in raid10_resize()
4354 mddev->recovery_cp = oldsize; in raid10_resize()
4355 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
4358 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
4359 mddev->resync_max_sectors = size; in raid10_resize()
4363 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4368 if (mddev->degraded > 0) { in raid10_takeover_raid0()
4370 mdname(mddev)); in raid10_takeover_raid0()
4376 mddev->new_level = 10; in raid10_takeover_raid0()
4378 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
4379 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4380 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
4381 mddev->raid_disks *= 2; in raid10_takeover_raid0()
4383 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
4384 mddev->dev_sectors = size; in raid10_takeover_raid0()
4386 conf = setup_conf(mddev); in raid10_takeover_raid0()
4388 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
4399 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
4406 if (mddev->level == 0) { in raid10_takeover()
4408 raid0_conf = mddev->private; in raid10_takeover()
4411 mdname(mddev)); in raid10_takeover()
4414 return raid10_takeover_raid0(mddev, in raid10_takeover()
4421 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
4437 struct r10conf *conf = mddev->private; in raid10_check_reshape()
4443 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4450 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
4459 if (mddev->delta_disks > 0) { in raid10_check_reshape()
4462 kcalloc(mddev->raid_disks + mddev->delta_disks, in raid10_check_reshape()
4528 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
4544 struct r10conf *conf = mddev->private; in raid10_start_reshape()
4549 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
4552 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4560 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4567 if (!mddev->reshape_backwards) in raid10_start_reshape()
4580 if (spares < mddev->delta_disks) in raid10_start_reshape()
4594 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4596 if (mddev->reshape_backwards) { in raid10_start_reshape()
4597 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4598 if (size < mddev->array_sectors) { in raid10_start_reshape()
4601 mdname(mddev)); in raid10_start_reshape()
4604 mddev->resync_max_sectors = size; in raid10_start_reshape()
4611 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4615 oldsize = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4616 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4618 if (!mddev_is_clustered(mddev)) { in raid10_start_reshape()
4619 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4626 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4641 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4645 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); in raid10_start_reshape()
4647 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); in raid10_start_reshape()
4652 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4653 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4656 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4664 sysfs_link_rdev(mddev, rdev); in raid10_start_reshape()
4677 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4679 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4680 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4681 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid10_start_reshape()
4683 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4684 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4685 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4686 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4687 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4689 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4691 if (!mddev->sync_thread) { in raid10_start_reshape()
4696 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4701 mddev->recovery = 0; in raid10_start_reshape()
4704 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4705 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4710 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4746 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4786 struct r10conf *conf = mddev->private; in reshape_request()
4801 if (mddev->reshape_backwards && in reshape_request()
4802 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4803 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4805 } else if (!mddev->reshape_backwards && in reshape_request()
4809 mddev->curr_resync_completed = sector_nr; in reshape_request()
4810 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
4820 if (mddev->reshape_backwards) { in reshape_request()
4870 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4871 if (mddev->reshape_backwards) in reshape_request()
4872 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4875 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4877 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
4878 md_wakeup_thread(mddev->thread); in reshape_request()
4879 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
4880 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4881 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4885 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4896 r10_bio->mddev = mddev; in reshape_request()
4909 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4914 GFP_KERNEL, &mddev->bio_set); in reshape_request()
4926 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4944 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
5014 if (mddev->reshape_backwards) in reshape_request()
5023 static int handle_reshape_read_error(struct mddev *mddev,
5025 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
5032 struct r10conf *conf = mddev->private; in reshape_request_write()
5036 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
5038 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
5074 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
5079 md_finish_reshape(conf->mddev); in end_reshape()
5085 if (conf->mddev->queue) in end_reshape()
5090 static void raid10_update_reshape_pos(struct mddev *mddev) in raid10_update_reshape_pos() argument
5092 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos()
5095 md_cluster_ops->resync_info_get(mddev, &lo, &hi); in raid10_update_reshape_pos()
5096 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) in raid10_update_reshape_pos()
5097 || mddev->reshape_position == MaxSector) in raid10_update_reshape_pos()
5098 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
5103 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
5108 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
5116 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in handle_reshape_read_error()
5152 rdev_dec_pending(rdev, mddev); in handle_reshape_read_error()
5167 &mddev->recovery); in handle_reshape_read_error()
5181 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
5182 struct r10conf *conf = mddev->private; in end_reshape_write()
5198 md_error(mddev, rdev); in end_reshape_write()
5201 rdev_dec_pending(rdev, mddev); in end_reshape_write()
5209 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
5214 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
5216 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
5218 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
5221 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
5222 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
5223 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
5224 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
5226 mddev->resync_max_sectors = mddev->array_sectors; in raid10_finish_reshape()
5231 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
5242 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
5243 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
5244 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
5245 mddev->delta_disks = 0; in raid10_finish_reshape()
5246 mddev->reshape_backwards = 0; in raid10_finish_reshape()