Searched refs:new_chunk_sectors (Results 1 – 7 of 7) sorted by relevance
225 int new_chunk_sectors; member265 l->new_chunk_sectors = mddev->new_chunk_sectors; in rs_config_backup()274 mddev->new_chunk_sectors = l->new_chunk_sectors; in rs_config_restore()716 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur()729 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new()1159 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params()1539 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache()1879 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change()2130 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); in super_sync()2243 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); in super_init_validation()[all …]
620 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid45()662 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid0_takeover_raid10()705 mddev->new_chunk_sectors = chunksect; in raid0_takeover_raid1()
7461 if (!mddev->new_chunk_sectors || in setup_conf()7462 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || in setup_conf()7463 !is_power_of_2(mddev->new_chunk_sectors)) { in setup_conf()7465 mdname(mddev), mddev->new_chunk_sectors << 9); in setup_conf()7576 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()7634 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()7812 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in raid5_run()7835 abs(min_offset_diff) >= mddev->new_chunk_sectors) in raid5_run()7859 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in raid5_run()8403 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()[all …]
1303 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()1311 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()1447 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()1839 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()1850 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()2020 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()3976 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()3999 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()4201 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()4203 mddev->new_chunk_sectors << 9, in chunk_size_show()[all …]
367 int new_chunk_sectors; member
3270 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()3273 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()3373 mddev->new_chunk_sectors = 0; in raid1_takeover()
3963 chunk = mddev->new_chunk_sectors; in setup_geo()3970 chunk = mddev->new_chunk_sectors; in setup_geo()4379 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()