Lines Matching refs:rs

260 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)  in rs_config_backup()  argument
262 struct mddev *mddev = &rs->md; in rs_config_backup()
269 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) in rs_config_restore() argument
271 struct mddev *mddev = &rs->md; in rs_config_restore()
400 static bool rs_is_raid0(struct raid_set *rs) in rs_is_raid0() argument
402 return !rs->md.level; in rs_is_raid0()
406 static bool rs_is_raid1(struct raid_set *rs) in rs_is_raid1() argument
408 return rs->md.level == 1; in rs_is_raid1()
412 static bool rs_is_raid10(struct raid_set *rs) in rs_is_raid10() argument
414 return rs->md.level == 10; in rs_is_raid10()
418 static bool rs_is_raid6(struct raid_set *rs) in rs_is_raid6() argument
420 return rs->md.level == 6; in rs_is_raid6()
424 static bool rs_is_raid456(struct raid_set *rs) in rs_is_raid456() argument
426 return __within_range(rs->md.level, 4, 6); in rs_is_raid456()
431 static bool rs_is_reshapable(struct raid_set *rs) in rs_is_reshapable() argument
433 return rs_is_raid456(rs) || in rs_is_reshapable()
434 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); in rs_is_reshapable()
438 static bool rs_is_recovering(struct raid_set *rs) in rs_is_recovering() argument
440 return rs->md.recovery_cp < rs->md.dev_sectors; in rs_is_recovering()
444 static bool rs_is_reshaping(struct raid_set *rs) in rs_is_reshaping() argument
446 return rs->md.reshape_position != MaxSector; in rs_is_reshaping()
491 static unsigned long __valid_flags(struct raid_set *rs) in __valid_flags() argument
493 if (rt_is_raid0(rs->raid_type)) in __valid_flags()
495 else if (rt_is_raid1(rs->raid_type)) in __valid_flags()
497 else if (rt_is_raid10(rs->raid_type)) in __valid_flags()
499 else if (rt_is_raid45(rs->raid_type)) in __valid_flags()
501 else if (rt_is_raid6(rs->raid_type)) in __valid_flags()
512 static int rs_check_for_valid_flags(struct raid_set *rs) in rs_check_for_valid_flags() argument
514 if (rs->ctr_flags & ~__valid_flags(rs)) { in rs_check_for_valid_flags()
515 rs->ti->error = "Invalid flags combination"; in rs_check_for_valid_flags()
599 static int raid10_format_to_md_layout(struct raid_set *rs, in raid10_format_to_md_layout() argument
620 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
625 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) in raid10_format_to_md_layout()
683 static void rs_set_rdev_sectors(struct raid_set *rs) in rs_set_rdev_sectors() argument
685 struct mddev *mddev = &rs->md; in rs_set_rdev_sectors()
700 static void rs_set_capacity(struct raid_set *rs) in rs_set_capacity() argument
702 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); in rs_set_capacity()
704 set_capacity_and_notify(gendisk, rs->md.array_sectors); in rs_set_capacity()
711 static void rs_set_cur(struct raid_set *rs) in rs_set_cur() argument
713 struct mddev *mddev = &rs->md; in rs_set_cur()
724 static void rs_set_new(struct raid_set *rs) in rs_set_new() argument
726 struct mddev *mddev = &rs->md; in rs_set_new()
731 mddev->raid_disks = rs->raid_disks; in rs_set_new()
739 struct raid_set *rs; in raid_set_alloc() local
746 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL); in raid_set_alloc()
747 if (!rs) { in raid_set_alloc()
752 mddev_init(&rs->md); in raid_set_alloc()
754 rs->raid_disks = raid_devs; in raid_set_alloc()
755 rs->delta_disks = 0; in raid_set_alloc()
757 rs->ti = ti; in raid_set_alloc()
758 rs->raid_type = raid_type; in raid_set_alloc()
759 rs->stripe_cache_entries = 256; in raid_set_alloc()
760 rs->md.raid_disks = raid_devs; in raid_set_alloc()
761 rs->md.level = raid_type->level; in raid_set_alloc()
762 rs->md.new_level = rs->md.level; in raid_set_alloc()
763 rs->md.layout = raid_type->algorithm; in raid_set_alloc()
764 rs->md.new_layout = rs->md.layout; in raid_set_alloc()
765 rs->md.delta_disks = 0; in raid_set_alloc()
766 rs->md.recovery_cp = MaxSector; in raid_set_alloc()
769 md_rdev_init(&rs->dev[i].rdev); in raid_set_alloc()
780 return rs; in raid_set_alloc()
784 static void raid_set_free(struct raid_set *rs) in raid_set_free() argument
788 if (rs->journal_dev.dev) { in raid_set_free()
789 md_rdev_clear(&rs->journal_dev.rdev); in raid_set_free()
790 dm_put_device(rs->ti, rs->journal_dev.dev); in raid_set_free()
793 for (i = 0; i < rs->raid_disks; i++) { in raid_set_free()
794 if (rs->dev[i].meta_dev) in raid_set_free()
795 dm_put_device(rs->ti, rs->dev[i].meta_dev); in raid_set_free()
796 md_rdev_clear(&rs->dev[i].rdev); in raid_set_free()
797 if (rs->dev[i].data_dev) in raid_set_free()
798 dm_put_device(rs->ti, rs->dev[i].data_dev); in raid_set_free()
801 kfree(rs); in raid_set_free()
820 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) in parse_dev_params() argument
833 for (i = 0; i < rs->raid_disks; i++) { in parse_dev_params()
834 rs->dev[i].rdev.raid_disk = i; in parse_dev_params()
836 rs->dev[i].meta_dev = NULL; in parse_dev_params()
837 rs->dev[i].data_dev = NULL; in parse_dev_params()
843 rs->dev[i].rdev.data_offset = 0; in parse_dev_params()
844 rs->dev[i].rdev.new_data_offset = 0; in parse_dev_params()
845 rs->dev[i].rdev.mddev = &rs->md; in parse_dev_params()
852 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
853 &rs->dev[i].meta_dev); in parse_dev_params()
855 rs->ti->error = "RAID metadata device lookup failure"; in parse_dev_params()
859 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); in parse_dev_params()
860 if (!rs->dev[i].rdev.sb_page) { in parse_dev_params()
861 rs->ti->error = "Failed to allocate superblock page"; in parse_dev_params()
871 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && in parse_dev_params()
872 (!rs->dev[i].rdev.recovery_offset)) { in parse_dev_params()
873 rs->ti->error = "Drive designated for rebuild not specified"; in parse_dev_params()
877 if (rs->dev[i].meta_dev) { in parse_dev_params()
878 rs->ti->error = "No data device supplied with metadata device"; in parse_dev_params()
885 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_dev_params()
886 &rs->dev[i].data_dev); in parse_dev_params()
888 rs->ti->error = "RAID device lookup failure"; in parse_dev_params()
892 if (rs->dev[i].meta_dev) { in parse_dev_params()
894 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; in parse_dev_params()
896 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; in parse_dev_params()
897 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); in parse_dev_params()
898 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) in parse_dev_params()
902 if (rs->journal_dev.dev) in parse_dev_params()
903 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks); in parse_dev_params()
906 rs->md.external = 0; in parse_dev_params()
907 rs->md.persistent = 1; in parse_dev_params()
908 rs->md.major_version = 2; in parse_dev_params()
909 } else if (rebuild && !rs->md.recovery_cp) { in parse_dev_params()
921 rs->ti->error = "Unable to rebuild drive while array is not in-sync"; in parse_dev_params()
938 static int validate_region_size(struct raid_set *rs, unsigned long region_size) in validate_region_size() argument
940 unsigned long min_region_size = rs->ti->len / (1 << 21); in validate_region_size()
942 if (rs_is_raid0(rs)) in validate_region_size()
962 if (region_size > rs->ti->len) { in validate_region_size()
963 rs->ti->error = "Supplied region size is too large"; in validate_region_size()
970 rs->ti->error = "Supplied region size is too small"; in validate_region_size()
975 rs->ti->error = "Region size is not a power of 2"; in validate_region_size()
979 if (region_size < rs->md.chunk_sectors) { in validate_region_size()
980 rs->ti->error = "Region size is smaller than the chunk size"; in validate_region_size()
988 rs->md.bitmap_info.chunksize = to_bytes(region_size); in validate_region_size()
1002 static int validate_raid_redundancy(struct raid_set *rs) in validate_raid_redundancy() argument
1008 for (i = 0; i < rs->raid_disks; i++) in validate_raid_redundancy()
1009 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && in validate_raid_redundancy()
1010 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || in validate_raid_redundancy()
1011 !rs->dev[i].rdev.sb_page))) in validate_raid_redundancy()
1014 switch (rs->md.level) { in validate_raid_redundancy()
1018 if (rebuild_cnt >= rs->md.raid_disks) in validate_raid_redundancy()
1024 if (rebuild_cnt > rs->raid_type->parity_devs) in validate_raid_redundancy()
1028 copies = raid10_md_layout_to_copies(rs->md.new_layout); in validate_raid_redundancy()
1051 raid_disks = min(rs->raid_disks, rs->md.raid_disks); in validate_raid_redundancy()
1052 if (__is_raid10_near(rs->md.new_layout)) { in validate_raid_redundancy()
1056 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
1057 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
1082 if ((!rs->dev[i].rdev.sb_page || in validate_raid_redundancy()
1083 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && in validate_raid_redundancy()
1124 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, in parse_raid_params() argument
1134 struct raid_type *rt = rs->raid_type; in parse_raid_params()
1140 rs->ti->error = "Bad numerical argument given for chunk_size"; in parse_raid_params()
1153 rs->ti->error = "Chunk size must be a power of 2"; in parse_raid_params()
1156 rs->ti->error = "Chunk size value is too small"; in parse_raid_params()
1160 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()
1179 for (i = 0; i < rs->raid_disks; i++) { in parse_raid_params()
1180 set_bit(In_sync, &rs->dev[i].rdev.flags); in parse_raid_params()
1181 rs->dev[i].rdev.recovery_offset = MaxSector; in parse_raid_params()
1190 rs->ti->error = "Not enough raid parameters given"; in parse_raid_params()
1195 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1196 rs->ti->error = "Only one 'nosync' argument allowed"; in parse_raid_params()
1202 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { in parse_raid_params()
1203 rs->ti->error = "Only one 'sync' argument allowed"; in parse_raid_params()
1209 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1210 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; in parse_raid_params()
1219 rs->ti->error = "Wrong number of raid parameters given"; in parse_raid_params()
1228 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { in parse_raid_params()
1229 rs->ti->error = "Only one 'raid10_format' argument pair allowed"; in parse_raid_params()
1233 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; in parse_raid_params()
1238 rs->ti->error = "Invalid 'raid10_format' value given"; in parse_raid_params()
1249 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in parse_raid_params()
1250 rs->ti->error = "Only one raid4/5/6 set journaling device allowed"; in parse_raid_params()
1254 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type"; in parse_raid_params()
1257 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), in parse_raid_params()
1258 &rs->journal_dev.dev); in parse_raid_params()
1260 rs->ti->error = "raid4/5/6 journal device lookup failure"; in parse_raid_params()
1263 jdev = &rs->journal_dev.rdev; in parse_raid_params()
1265 jdev->mddev = &rs->md; in parse_raid_params()
1266 jdev->bdev = rs->journal_dev.dev->bdev; in parse_raid_params()
1269 rs->ti->error = "No space for raid4/5/6 journal"; in parse_raid_params()
1272 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH; in parse_raid_params()
1281 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in parse_raid_params()
1282 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'"; in parse_raid_params()
1285 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { in parse_raid_params()
1286 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed"; in parse_raid_params()
1291 rs->ti->error = "Invalid 'journal_mode' argument"; in parse_raid_params()
1294 rs->journal_dev.mode = r; in parse_raid_params()
1302 rs->ti->error = "Bad numerical argument given in raid params"; in parse_raid_params()
1312 if (!__within_range(value, 0, rs->raid_disks - 1)) { in parse_raid_params()
1313 rs->ti->error = "Invalid rebuild index given"; in parse_raid_params()
1317 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { in parse_raid_params()
1318 rs->ti->error = "rebuild for this index already given"; in parse_raid_params()
1322 rd = rs->dev + value; in parse_raid_params()
1326 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); in parse_raid_params()
1329 rs->ti->error = "write_mostly option is only valid for RAID1"; in parse_raid_params()
1333 if (!__within_range(value, 0, rs->md.raid_disks - 1)) { in parse_raid_params()
1334 rs->ti->error = "Invalid write_mostly index given"; in parse_raid_params()
1339 set_bit(WriteMostly, &rs->dev[value].rdev.flags); in parse_raid_params()
1340 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); in parse_raid_params()
1343 rs->ti->error = "max_write_behind option is only valid for RAID1"; in parse_raid_params()
1347 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { in parse_raid_params()
1348 rs->ti->error = "Only one max_write_behind argument pair allowed"; in parse_raid_params()
1357 rs->ti->error = "Max write-behind limit out of range"; in parse_raid_params()
1361 rs->md.bitmap_info.max_write_behind = value / 2; in parse_raid_params()
1363 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { in parse_raid_params()
1364 rs->ti->error = "Only one daemon_sleep argument pair allowed"; in parse_raid_params()
1368 rs->ti->error = "daemon sleep period out of range"; in parse_raid_params()
1371 rs->md.bitmap_info.daemon_sleep = value; in parse_raid_params()
1374 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in parse_raid_params()
1375 rs->ti->error = "Only one data_offset argument pair allowed"; in parse_raid_params()
1381 rs->ti->error = "Bogus data_offset value"; in parse_raid_params()
1384 rs->data_offset = value; in parse_raid_params()
1387 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in parse_raid_params()
1388 rs->ti->error = "Only one delta_disks argument pair allowed"; in parse_raid_params()
1393 rs->ti->error = "Too many delta_disk requested"; in parse_raid_params()
1397 rs->delta_disks = value; in parse_raid_params()
1399 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { in parse_raid_params()
1400 rs->ti->error = "Only one stripe_cache argument pair allowed"; in parse_raid_params()
1405 rs->ti->error = "Inappropriate argument: stripe_cache"; in parse_raid_params()
1410 rs->ti->error = "Bogus stripe cache entries value"; in parse_raid_params()
1413 rs->stripe_cache_entries = value; in parse_raid_params()
1415 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1416 rs->ti->error = "Only one min_recovery_rate argument pair allowed"; in parse_raid_params()
1421 rs->ti->error = "min_recovery_rate out of range"; in parse_raid_params()
1424 rs->md.sync_speed_min = value; in parse_raid_params()
1426 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) { in parse_raid_params()
1427 rs->ti->error = "Only one max_recovery_rate argument pair allowed"; in parse_raid_params()
1432 rs->ti->error = "max_recovery_rate out of range"; in parse_raid_params()
1435 rs->md.sync_speed_max = value; in parse_raid_params()
1437 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { in parse_raid_params()
1438 rs->ti->error = "Only one region_size argument pair allowed"; in parse_raid_params()
1443 rs->requested_bitmap_chunk_sectors = value; in parse_raid_params()
1445 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { in parse_raid_params()
1446 rs->ti->error = "Only one raid10_copies argument pair allowed"; in parse_raid_params()
1450 if (!__within_range(value, 2, rs->md.raid_disks)) { in parse_raid_params()
1451 rs->ti->error = "Bad value for 'raid10_copies'"; in parse_raid_params()
1458 rs->ti->error = "Unable to parse RAID parameter"; in parse_raid_params()
1463 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) && in parse_raid_params()
1464 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in parse_raid_params()
1465 rs->ti->error = "sync and nosync are mutually exclusive"; in parse_raid_params()
1469 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && in parse_raid_params()
1470 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) || in parse_raid_params()
1471 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) { in parse_raid_params()
1472 rs->ti->error = "sync/nosync and rebuild are mutually exclusive"; in parse_raid_params()
1476 if (write_mostly >= rs->md.raid_disks) { in parse_raid_params()
1477 rs->ti->error = "Can't set all raid1 devices to write_mostly"; in parse_raid_params()
1481 if (rs->md.sync_speed_max && in parse_raid_params()
1482 rs->md.sync_speed_min > rs->md.sync_speed_max) { in parse_raid_params()
1483 rs->ti->error = "Bogus recovery rates"; in parse_raid_params()
1487 if (validate_region_size(rs, region_size)) in parse_raid_params()
1490 if (rs->md.chunk_sectors) in parse_raid_params()
1491 max_io_len = rs->md.chunk_sectors; in parse_raid_params()
1495 if (dm_set_target_max_io_len(rs->ti, max_io_len)) in parse_raid_params()
1499 if (raid10_copies > rs->md.raid_disks) { in parse_raid_params()
1500 rs->ti->error = "Not enough devices to satisfy specification"; in parse_raid_params()
1504 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); in parse_raid_params()
1505 if (rs->md.new_layout < 0) { in parse_raid_params()
1506 rs->ti->error = "Error getting raid10 format"; in parse_raid_params()
1507 return rs->md.new_layout; in parse_raid_params()
1510 rt = get_raid_type_by_ll(10, rs->md.new_layout); in parse_raid_params()
1512 rs->ti->error = "Failed to recognize new raid10 layout"; in parse_raid_params()
1518 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { in parse_raid_params()
1519 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; in parse_raid_params()
1524 rs->raid10_copies = raid10_copies; in parse_raid_params()
1527 rs->md.persistent = 0; in parse_raid_params()
1528 rs->md.external = 1; in parse_raid_params()
1531 return rs_check_for_valid_flags(rs); in parse_raid_params()
1535 static int rs_set_raid456_stripe_cache(struct raid_set *rs) in rs_set_raid456_stripe_cache() argument
1539 struct mddev *mddev = &rs->md; in rs_set_raid456_stripe_cache()
1541 uint32_t nr_stripes = rs->stripe_cache_entries; in rs_set_raid456_stripe_cache()
1543 if (!rt_is_raid456(rs->raid_type)) { in rs_set_raid456_stripe_cache()
1544 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; in rs_set_raid456_stripe_cache()
1556 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; in rs_set_raid456_stripe_cache()
1564 rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; in rs_set_raid456_stripe_cache()
1575 static unsigned int mddev_data_stripes(struct raid_set *rs) in mddev_data_stripes() argument
1577 return rs->md.raid_disks - rs->raid_type->parity_devs; in mddev_data_stripes()
1581 static unsigned int rs_data_stripes(struct raid_set *rs) in rs_data_stripes() argument
1583 return rs->raid_disks - rs->raid_type->parity_devs; in rs_data_stripes()
1590 static sector_t __rdev_sectors(struct raid_set *rs) in __rdev_sectors() argument
1594 for (i = 0; i < rs->raid_disks; i++) { in __rdev_sectors()
1595 struct md_rdev *rdev = &rs->dev[i].rdev; in __rdev_sectors()
1606 static int _check_data_dev_sectors(struct raid_set *rs) in _check_data_dev_sectors() argument
1611 rdev_for_each(rdev, &rs->md) in _check_data_dev_sectors()
1614 if (ds < rs->md.dev_sectors) { in _check_data_dev_sectors()
1615 rs->ti->error = "Component device(s) too small"; in _check_data_dev_sectors()
1624 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev) in rs_set_dev_and_array_sectors() argument
1629 struct mddev *mddev = &rs->md; in rs_set_dev_and_array_sectors()
1633 data_stripes = mddev_data_stripes(rs); in rs_set_dev_and_array_sectors()
1635 delta_disks = rs->delta_disks; in rs_set_dev_and_array_sectors()
1636 data_stripes = rs_data_stripes(rs); in rs_set_dev_and_array_sectors()
1640 if (rt_is_raid1(rs->raid_type)) in rs_set_dev_and_array_sectors()
1642 else if (rt_is_raid10(rs->raid_type)) { in rs_set_dev_and_array_sectors()
1643 if (rs->raid10_copies < 2 || in rs_set_dev_and_array_sectors()
1645 rs->ti->error = "Bogus raid10 data copies or delta disks"; in rs_set_dev_and_array_sectors()
1649 dev_sectors *= rs->raid10_copies; in rs_set_dev_and_array_sectors()
1654 if (sector_div(array_sectors, rs->raid10_copies)) in rs_set_dev_and_array_sectors()
1666 rs_set_rdev_sectors(rs); in rs_set_dev_and_array_sectors()
1668 return _check_data_dev_sectors(rs); in rs_set_dev_and_array_sectors()
1670 rs->ti->error = "Target length not divisible by number of data devices"; in rs_set_dev_and_array_sectors()
1675 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) in rs_setup_recovery() argument
1678 if (rs_is_raid0(rs)) in rs_setup_recovery()
1679 rs->md.recovery_cp = MaxSector; in rs_setup_recovery()
1685 else if (rs_is_raid6(rs)) in rs_setup_recovery()
1686 rs->md.recovery_cp = dev_sectors; in rs_setup_recovery()
1692 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) in rs_setup_recovery()
1698 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); in do_table_event() local
1701 if (!rs_is_reshaping(rs)) { in do_table_event()
1702 if (rs_is_raid10(rs)) in do_table_event()
1703 rs_set_rdev_sectors(rs); in do_table_event()
1704 rs_set_capacity(rs); in do_table_event()
1706 dm_table_event(rs->ti->table); in do_table_event()
1715 static int rs_check_takeover(struct raid_set *rs) in rs_check_takeover() argument
1717 struct mddev *mddev = &rs->md; in rs_check_takeover()
1720 if (rs->md.degraded) { in rs_check_takeover()
1721 rs->ti->error = "Can't takeover degraded raid set"; in rs_check_takeover()
1725 if (rs_is_reshaping(rs)) { in rs_check_takeover()
1726 rs->ti->error = "Can't takeover reshaping raid set"; in rs_check_takeover()
1739 !(rs->raid_disks % mddev->raid_disks)) in rs_check_takeover()
1865 rs->ti->error = "takeover not possible"; in rs_check_takeover()
1870 static bool rs_takeover_requested(struct raid_set *rs) in rs_takeover_requested() argument
1872 return rs->md.new_level != rs->md.level; in rs_takeover_requested()
1876 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) in rs_is_layout_change() argument
1878 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || in rs_is_layout_change()
1879 rs->md.new_layout != rs->md.layout || in rs_is_layout_change()
1880 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change()
1884 static bool rs_reshape_requested(struct raid_set *rs) in rs_reshape_requested() argument
1887 struct mddev *mddev = &rs->md; in rs_reshape_requested()
1889 if (rs_takeover_requested(rs)) in rs_reshape_requested()
1892 if (rs_is_raid0(rs)) in rs_reshape_requested()
1895 change = rs_is_layout_change(rs, false); in rs_reshape_requested()
1898 if (rs_is_raid1(rs)) { in rs_reshape_requested()
1899 if (rs->delta_disks) in rs_reshape_requested()
1900 return !!rs->delta_disks; in rs_reshape_requested()
1903 mddev->raid_disks != rs->raid_disks; in rs_reshape_requested()
1906 if (rs_is_raid10(rs)) in rs_reshape_requested()
1909 rs->delta_disks >= 0; in rs_reshape_requested()
2013 static int rs_check_reshape(struct raid_set *rs) in rs_check_reshape() argument
2015 struct mddev *mddev = &rs->md; in rs_check_reshape()
2018 rs->ti->error = "Reshape not supported"; in rs_check_reshape()
2020 rs->ti->error = "Can't reshape degraded raid set"; in rs_check_reshape()
2021 else if (rs_is_recovering(rs)) in rs_check_reshape()
2022 rs->ti->error = "Convert request on recovering raid set prohibited"; in rs_check_reshape()
2023 else if (rs_is_reshaping(rs)) in rs_check_reshape()
2024 rs->ti->error = "raid set already reshaping!"; in rs_check_reshape()
2025 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs))) in rs_check_reshape()
2026 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10"; in rs_check_reshape()
2088 struct raid_set *rs = container_of(mddev, struct raid_set, md); in super_sync() local
2100 for (i = 0; i < rs->raid_disks; i++) in super_sync()
2101 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { in super_sync()
2209 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) in super_init_validation() argument
2212 struct mddev *mddev = &rs->md; in super_init_validation()
2249 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { in super_init_validation()
2261 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); in super_init_validation()
2271 if (rs_takeover_requested(rs)) { in super_init_validation()
2278 } else if (rs_reshape_requested(rs)) { in super_init_validation()
2291 if (rs->delta_disks) in super_init_validation()
2293 mddev->raid_disks, mddev->raid_disks + rs->delta_disks); in super_init_validation()
2294 if (rs_is_raid10(rs)) { in super_init_validation()
2308 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in super_init_validation()
2343 if (new_devs == rs->raid_disks || !rebuilds) { in super_init_validation()
2345 if (new_devs == rs->raid_disks) { in super_init_validation()
2349 new_devs != rs->delta_disks) { in super_init_validation()
2364 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) { in super_init_validation()
2368 } else if (rs_is_reshaping(rs)) { in super_init_validation()
2397 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { in super_init_validation()
2399 rs->raid_disks % rs->raid10_copies) { in super_init_validation()
2400 rs->ti->error = in super_init_validation()
2407 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && in super_init_validation()
2408 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && in super_init_validation()
2409 !rt_is_raid1(rs->raid_type)) { in super_init_validation()
2410 rs->ti->error = "Cannot change device positions in raid set"; in super_init_validation()
2429 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) in super_validate() argument
2431 struct mddev *mddev = &rs->md; in super_validate()
2434 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0) in super_validate()
2443 if (!mddev->events && super_init_validation(rs, rdev)) in super_validate()
2448 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; in super_validate()
2453 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; in super_validate()
2458 …mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(40… in super_validate()
2477 else if (!rs_is_reshaping(rs)) in super_validate()
2500 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) in analyse_superblocks() argument
2504 struct mddev *mddev = &rs->md; in analyse_superblocks()
2531 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in analyse_superblocks()
2548 if (rs_is_raid0(rs)) in analyse_superblocks()
2571 rs->ti->error = "Unable to assemble array: Invalid superblocks"; in analyse_superblocks()
2572 if (super_validate(rs, freshest)) in analyse_superblocks()
2575 if (validate_raid_redundancy(rs)) { in analyse_superblocks()
2576 rs->ti->error = "Insufficient redundancy to activate array"; in analyse_superblocks()
2583 super_validate(rs, rdev)) in analyse_superblocks()
2596 static int rs_adjust_data_offsets(struct raid_set *rs) in rs_adjust_data_offsets() argument
2602 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { in rs_adjust_data_offsets()
2603 if (!rs_is_reshapable(rs)) in rs_adjust_data_offsets()
2610 rdev = &rs->dev[0].rdev; in rs_adjust_data_offsets()
2612 if (rs->delta_disks < 0) { in rs_adjust_data_offsets()
2622 new_data_offset = rs->data_offset; in rs_adjust_data_offsets()
2624 } else if (rs->delta_disks > 0) { in rs_adjust_data_offsets()
2633 data_offset = rs->data_offset; in rs_adjust_data_offsets()
2655 data_offset = rs->data_offset ? rdev->data_offset : 0; in rs_adjust_data_offsets()
2656 new_data_offset = data_offset ? 0 : rs->data_offset; in rs_adjust_data_offsets()
2657 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_adjust_data_offsets()
2663 if (rs->data_offset && in rs_adjust_data_offsets()
2664 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) { in rs_adjust_data_offsets()
2665 rs->ti->error = data_offset ? "No space for forward reshape" : in rs_adjust_data_offsets()
2674 if (rs->md.recovery_cp < rs->md.dev_sectors) in rs_adjust_data_offsets()
2675 rs->md.recovery_cp += rs->dev[0].rdev.data_offset; in rs_adjust_data_offsets()
2678 rdev_for_each(rdev, &rs->md) { in rs_adjust_data_offsets()
2689 static void __reorder_raid_disk_indexes(struct raid_set *rs) in __reorder_raid_disk_indexes() argument
2694 rdev_for_each(rdev, &rs->md) { in __reorder_raid_disk_indexes()
2705 static int rs_setup_takeover(struct raid_set *rs) in rs_setup_takeover() argument
2707 struct mddev *mddev = &rs->md; in rs_setup_takeover()
2709 unsigned int d = mddev->raid_disks = rs->raid_disks; in rs_setup_takeover()
2710 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; in rs_setup_takeover()
2712 if (rt_is_raid10(rs->raid_type)) { in rs_setup_takeover()
2713 if (rs_is_raid0(rs)) { in rs_setup_takeover()
2715 __reorder_raid_disk_indexes(rs); in rs_setup_takeover()
2718 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, in rs_setup_takeover()
2719 rs->raid10_copies); in rs_setup_takeover()
2720 } else if (rs_is_raid1(rs)) in rs_setup_takeover()
2722 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_setup_takeover()
2723 rs->raid_disks); in rs_setup_takeover()
2733 rdev = &rs->dev[d].rdev; in rs_setup_takeover()
2735 if (test_bit(d, (void *) rs->rebuild_disks)) { in rs_setup_takeover()
2750 static int rs_prepare_reshape(struct raid_set *rs) in rs_prepare_reshape() argument
2753 struct mddev *mddev = &rs->md; in rs_prepare_reshape()
2755 if (rs_is_raid10(rs)) { in rs_prepare_reshape()
2756 if (rs->raid_disks != mddev->raid_disks && in rs_prepare_reshape()
2758 rs->raid10_copies && in rs_prepare_reshape()
2759 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { in rs_prepare_reshape()
2766 if (rs->raid_disks % rs->raid10_copies) { in rs_prepare_reshape()
2767 rs->ti->error = "Can't reshape raid10 mirror groups"; in rs_prepare_reshape()
2772 __reorder_raid_disk_indexes(rs); in rs_prepare_reshape()
2773 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, in rs_prepare_reshape()
2774 rs->raid10_copies); in rs_prepare_reshape()
2780 } else if (rs_is_raid456(rs)) in rs_prepare_reshape()
2783 else if (rs_is_raid1(rs)) { in rs_prepare_reshape()
2784 if (rs->delta_disks) { in rs_prepare_reshape()
2786 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; in rs_prepare_reshape()
2790 mddev->raid_disks = rs->raid_disks; in rs_prepare_reshape()
2794 rs->ti->error = "Called with bogus raid type"; in rs_prepare_reshape()
2799 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); in rs_prepare_reshape()
2800 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2801 } else if (mddev->raid_disks < rs->raid_disks) in rs_prepare_reshape()
2803 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in rs_prepare_reshape()
2809 static sector_t _get_reshape_sectors(struct raid_set *rs) in _get_reshape_sectors() argument
2814 rdev_for_each(rdev, &rs->md) in _get_reshape_sectors()
2822 return max(reshape_sectors, (sector_t) rs->data_offset); in _get_reshape_sectors()
2832 static int rs_setup_reshape(struct raid_set *rs) in rs_setup_reshape() argument
2836 sector_t reshape_sectors = _get_reshape_sectors(rs); in rs_setup_reshape()
2837 struct mddev *mddev = &rs->md; in rs_setup_reshape()
2840 mddev->delta_disks = rs->delta_disks; in rs_setup_reshape()
2846 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks); in rs_setup_reshape()
2873 if (rs->delta_disks > 0) { in rs_setup_reshape()
2875 for (d = cur_raid_devs; d < rs->raid_disks; d++) { in rs_setup_reshape()
2876 rdev = &rs->dev[d].rdev; in rs_setup_reshape()
2887 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector; in rs_setup_reshape()
2893 } else if (rs->delta_disks < 0) { in rs_setup_reshape()
2894 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true); in rs_setup_reshape()
2920 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; in rs_setup_reshape()
2928 rdev_for_each(rdev, &rs->md) in rs_setup_reshape()
2940 static void rs_reset_inconclusive_reshape(struct raid_set *rs) in rs_reset_inconclusive_reshape() argument
2942 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { in rs_reset_inconclusive_reshape()
2943 rs_set_cur(rs); in rs_reset_inconclusive_reshape()
2944 rs->md.delta_disks = 0; in rs_reset_inconclusive_reshape()
2945 rs->md.reshape_backwards = 0; in rs_reset_inconclusive_reshape()
2953 static void configure_discard_support(struct raid_set *rs) in configure_discard_support() argument
2957 struct dm_target *ti = rs->ti; in configure_discard_support()
2962 raid456 = rs_is_raid456(rs); in configure_discard_support()
2964 for (i = 0; i < rs->raid_disks; i++) { in configure_discard_support()
2965 if (!rs->dev[i].rdev.bdev || in configure_discard_support()
2966 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev)) in configure_discard_support()
3001 struct raid_set *rs = NULL; in raid_ctr() local
3038 rs = raid_set_alloc(ti, rt, num_raid_devs); in raid_ctr()
3039 if (IS_ERR(rs)) in raid_ctr()
3040 return PTR_ERR(rs); in raid_ctr()
3042 r = parse_raid_params(rs, &as, num_raid_params); in raid_ctr()
3046 r = parse_dev_params(rs, &as); in raid_ctr()
3050 rs->md.sync_super = super_sync; in raid_ctr()
3058 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); in raid_ctr()
3063 rs->array_sectors = rs->md.array_sectors; in raid_ctr()
3064 rs->dev_sectors = rs->md.dev_sectors; in raid_ctr()
3071 rs_config_backup(rs, &rs_layout); in raid_ctr()
3073 r = analyse_superblocks(ti, rs); in raid_ctr()
3078 sb_array_sectors = rs->md.array_sectors; in raid_ctr()
3079 rdev_sectors = __rdev_sectors(rs); in raid_ctr()
3087 reshape_sectors = _get_reshape_sectors(rs); in raid_ctr()
3088 if (rs->dev_sectors != rdev_sectors) { in raid_ctr()
3089 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors); in raid_ctr()
3090 if (rs->dev_sectors > rdev_sectors - reshape_sectors) in raid_ctr()
3091 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3094 INIT_WORK(&rs->md.event_work, do_table_event); in raid_ctr()
3095 ti->private = rs; in raid_ctr()
3100 rs_config_restore(rs, &rs_layout); in raid_ctr()
3108 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) { in raid_ctr()
3110 if (rs_is_raid6(rs) && in raid_ctr()
3111 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { in raid_ctr()
3116 rs_setup_recovery(rs, 0); in raid_ctr()
3117 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3118 rs_set_new(rs); in raid_ctr()
3119 } else if (rs_is_recovering(rs)) { in raid_ctr()
3122 } else if (rs_is_reshaping(rs)) { in raid_ctr()
3130 } else if (rs_takeover_requested(rs)) { in raid_ctr()
3131 if (rs_is_reshaping(rs)) { in raid_ctr()
3138 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in raid_ctr()
3152 r = rs_check_takeover(rs); in raid_ctr()
3156 r = rs_setup_takeover(rs); in raid_ctr()
3160 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3162 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3163 rs_set_new(rs); in raid_ctr()
3164 } else if (rs_reshape_requested(rs)) { in raid_ctr()
3166 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3174 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { in raid_ctr()
3181 if (reshape_sectors || rs_is_raid1(rs)) { in raid_ctr()
3189 r = rs_prepare_reshape(rs); in raid_ctr()
3194 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3196 rs_set_cur(rs); in raid_ctr()
3200 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { in raid_ctr()
3201 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); in raid_ctr()
3202 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3203 rs_setup_recovery(rs, MaxSector); in raid_ctr()
3204 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { in raid_ctr()
3209 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false); in raid_ctr()
3213 …rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_se… in raid_ctr()
3216 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); in raid_ctr()
3220 if (sb_array_sectors > rs->array_sectors) in raid_ctr()
3221 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); in raid_ctr()
3223 rs_set_cur(rs); in raid_ctr()
3227 r = rs_adjust_data_offsets(rs); in raid_ctr()
3232 rs_reset_inconclusive_reshape(rs); in raid_ctr()
3235 rs->md.ro = 1; in raid_ctr()
3236 rs->md.in_sync = 1; in raid_ctr()
3239 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); in raid_ctr()
3242 mddev_lock_nointr(&rs->md); in raid_ctr()
3243 r = md_run(&rs->md); in raid_ctr()
3244 rs->md.in_sync = 0; /* Assume already marked dirty */ in raid_ctr()
3247 mddev_unlock(&rs->md); in raid_ctr()
3251 r = md_start(&rs->md); in raid_ctr()
3258 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { in raid_ctr()
3259 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); in raid_ctr()
3266 mddev_suspend(&rs->md); in raid_ctr()
3267 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); in raid_ctr()
3270 if (rs_is_raid456(rs)) { in raid_ctr()
3271 r = rs_set_raid456_stripe_cache(rs); in raid_ctr()
3277 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_ctr()
3278 r = rs_check_reshape(rs); in raid_ctr()
3283 rs_config_restore(rs, &rs_layout); in raid_ctr()
3285 if (rs->md.pers->start_reshape) { in raid_ctr()
3286 r = rs->md.pers->check_reshape(&rs->md); in raid_ctr()
3295 configure_discard_support(rs); in raid_ctr()
3297 mddev_unlock(&rs->md); in raid_ctr()
3301 md_stop(&rs->md); in raid_ctr()
3302 mddev_unlock(&rs->md); in raid_ctr()
3304 raid_set_free(rs); in raid_ctr()
3311 struct raid_set *rs = ti->private; in raid_dtr() local
3313 mddev_lock_nointr(&rs->md); in raid_dtr()
3314 md_stop(&rs->md); in raid_dtr()
3315 mddev_unlock(&rs->md); in raid_dtr()
3316 raid_set_free(rs); in raid_dtr()
3321 struct raid_set *rs = ti->private; in raid_map() local
3322 struct mddev *mddev = &rs->md; in raid_map()
3399 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev) in __raid_dev_status() argument
3406 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a"; in __raid_dev_status()
3407 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) || in __raid_dev_status()
3408 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) && in __raid_dev_status()
3416 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, in rs_get_progress() argument
3420 struct mddev *mddev = &rs->md; in rs_get_progress()
3422 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3423 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3425 if (rs_is_raid0(rs)) { in rs_get_progress()
3427 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3458 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3466 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3473 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); in rs_get_progress()
3484 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3488 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); in rs_get_progress()
3506 struct raid_set *rs = ti->private; in raid_status() local
3507 struct mddev *mddev = &rs->md; in raid_status()
3508 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL; in raid_status()
3530 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? in raid_status()
3532 recovery = rs->md.recovery; in raid_status()
3534 progress = rs_get_progress(rs, recovery, state, resync_max_sectors); in raid_status()
3539 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3540 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); in raid_status()
3585 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); in raid_status()
3590 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? in raid_status()
3591 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-"); in raid_status()
3601 for (i = 0; i < rs->raid_disks; i++) { in raid_status()
3602 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) + in raid_status()
3603 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0); in raid_status()
3605 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) + in raid_status()
3606 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0); in raid_status()
3609 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + in raid_status()
3610 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; in raid_status()
3613 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); in raid_status()
3614 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) in raid_status()
3616 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) in raid_status()
3618 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) in raid_status()
3619 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3620 if (test_bit(i, (void *) rs->rebuild_disks)) in raid_status()
3622 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) in raid_status()
3625 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3628 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) in raid_status()
3631 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags)) in raid_status()
3632 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3633 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags)) in raid_status()
3635 rs->dev[i].rdev.raid_disk); in raid_status()
3636 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) in raid_status()
3639 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) in raid_status()
3642 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) in raid_status()
3645 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) in raid_status()
3648 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) in raid_status()
3651 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) in raid_status()
3653 max(rs->delta_disks, mddev->delta_disks)); in raid_status()
3654 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) in raid_status()
3656 (unsigned long long) rs->data_offset); in raid_status()
3657 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) in raid_status()
3659 __get_dev_name(rs->journal_dev.dev)); in raid_status()
3660 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) in raid_status()
3662 md_journal_mode_to_dm_raid(rs->journal_dev.mode)); in raid_status()
3663 DMEMIT(" %d", rs->raid_disks); in raid_status()
3664 for (i = 0; i < rs->raid_disks; i++) in raid_status()
3665 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev), in raid_status()
3666 __get_dev_name(rs->dev[i].data_dev)); in raid_status()
3679 recovery = rs->md.recovery; in raid_status()
3683 for (i = 0; i < rs->raid_disks; i++) { in raid_status()
3685 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); in raid_status()
3690 switch (rs->journal_dev.mode) { in raid_status()
3712 struct raid_set *rs = ti->private; in raid_message() local
3713 struct mddev *mddev = &rs->md; in raid_message()
3763 struct raid_set *rs = ti->private; in raid_iterate_devices() local
3767 for (i = 0; !r && i < rs->raid_disks; i++) { in raid_iterate_devices()
3768 if (rs->dev[i].data_dev) { in raid_iterate_devices()
3769 r = fn(ti, rs->dev[i].data_dev, in raid_iterate_devices()
3771 rs->md.dev_sectors, data); in raid_iterate_devices()
3780 struct raid_set *rs = ti->private; in raid_io_hints() local
3781 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); in raid_io_hints()
3784 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); in raid_io_hints()
3789 struct raid_set *rs = ti->private; in raid_postsuspend() local
3791 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { in raid_postsuspend()
3793 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) in raid_postsuspend()
3794 md_stop_writes(&rs->md); in raid_postsuspend()
3796 mddev_lock_nointr(&rs->md); in raid_postsuspend()
3797 mddev_suspend(&rs->md); in raid_postsuspend()
3798 mddev_unlock(&rs->md); in raid_postsuspend()
3802 static void attempt_restore_of_faulty_devices(struct raid_set *rs) in attempt_restore_of_faulty_devices() argument
3809 struct mddev *mddev = &rs->md; in attempt_restore_of_faulty_devices()
3818 for (i = 0; i < rs->raid_disks; i++) { in attempt_restore_of_faulty_devices()
3819 r = &rs->dev[i].rdev; in attempt_restore_of_faulty_devices()
3828 rs->raid_type->name, i); in attempt_restore_of_faulty_devices()
3870 rdev_for_each(r, &rs->md) { in attempt_restore_of_faulty_devices()
3885 static int __load_dirty_region_bitmap(struct raid_set *rs) in __load_dirty_region_bitmap() argument
3890 if (!rs_is_raid0(rs) && in __load_dirty_region_bitmap()
3891 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { in __load_dirty_region_bitmap()
3892 r = md_bitmap_load(&rs->md); in __load_dirty_region_bitmap()
3901 static void rs_update_sbs(struct raid_set *rs) in rs_update_sbs() argument
3903 struct mddev *mddev = &rs->md; in rs_update_sbs()
3919 static int rs_start_reshape(struct raid_set *rs) in rs_start_reshape() argument
3922 struct mddev *mddev = &rs->md; in rs_start_reshape()
3928 r = rs_setup_reshape(rs); in rs_start_reshape()
3939 rs->ti->error = "pers->check_reshape() failed"; in rs_start_reshape()
3950 rs->ti->error = "pers->start_reshape() failed"; in rs_start_reshape()
3960 rs_update_sbs(rs); in rs_start_reshape()
3968 struct raid_set *rs = ti->private; in raid_preresume() local
3969 struct mddev *mddev = &rs->md; in raid_preresume()
3972 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) in raid_preresume()
3981 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) in raid_preresume()
3982 rs_update_sbs(rs); in raid_preresume()
3985 r = __load_dirty_region_bitmap(rs); in raid_preresume()
3990 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { in raid_preresume()
3991 mddev->array_sectors = rs->array_sectors; in raid_preresume()
3992 mddev->dev_sectors = rs->dev_sectors; in raid_preresume()
3993 rs_set_rdev_sectors(rs); in raid_preresume()
3994 rs_set_capacity(rs); in raid_preresume()
3998 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && in raid_preresume()
3999 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) || in raid_preresume()
4000 (rs->requested_bitmap_chunk_sectors && in raid_preresume()
4001 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { in raid_preresume()
4002 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize; in raid_preresume()
4015 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) in raid_preresume()
4020 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { in raid_preresume()
4022 rs_set_rdev_sectors(rs); in raid_preresume()
4024 r = rs_start_reshape(rs); in raid_preresume()
4036 struct raid_set *rs = ti->private; in raid_resume() local
4037 struct mddev *mddev = &rs->md; in raid_resume()
4039 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { in raid_resume()
4045 attempt_restore_of_faulty_devices(rs); in raid_resume()
4048 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { in raid_resume()
4051 rs_set_capacity(rs); in raid_resume()