/linux-6.6.21/drivers/md/ |
D | raid0.c | 84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 131 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones() 134 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 277 sector_div(first_sector, mddev->chunk_sectors); in create_strip_zones() 327 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector() 363 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size() 387 if (mddev->chunk_sectors == 0) { in raid0_run() 405 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 406 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() [all …]
|
D | md-linear.c | 87 if (mddev->chunk_sectors) { in linear_conf() 89 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 90 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 272 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
|
D | dm-zoned-target.c | 994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); in dmz_io_hints() local 1004 limits->max_discard_sectors = chunk_sectors; in dmz_io_hints() 1005 limits->max_hw_discard_sectors = chunk_sectors; in dmz_io_hints() 1006 limits->max_write_zeroes_sectors = chunk_sectors; in dmz_io_hints() 1009 limits->chunk_sectors = chunk_sectors; in dmz_io_hints() 1010 limits->max_sectors = chunk_sectors; in dmz_io_hints()
|
D | raid5.c | 924 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list() 3013 : conf->chunk_sectors; in raid5_compute_sector() 3209 : conf->chunk_sectors; in raid5_compute_blocknr() 3540 if (first + conf->chunk_sectors * (count - 1) != last) in stripe_bio_overlaps() 3647 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx() 5424 unsigned int chunk_sectors; in in_chunk_boundary() local 5427 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary() 5428 return chunk_sectors >= in in_chunk_boundary() 5429 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary() 5589 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read() [all …]
|
D | dm-raid.c | 717 mddev->new_chunk_sectors = mddev->chunk_sectors; in rs_set_cur() 730 mddev->chunk_sectors = mddev->new_chunk_sectors; in rs_set_new() 979 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 1160 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 1490 if (rs->md.chunk_sectors) in parse_raid_params() 1491 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 1540 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; in rs_set_raid456_stripe_cache() 1880 rs->md.new_chunk_sectors != rs->md.chunk_sectors; in rs_is_layout_change() 2122 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() 2233 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); in super_init_validation() [all …]
|
D | raid5-ppl.c | 329 (data_sector >> ilog2(conf->chunk_sectors) == in ppl_log_stripe() 330 data_sector_last >> ilog2(conf->chunk_sectors)) && in ppl_log_stripe() 814 if ((pp_size >> 9) < conf->chunk_sectors) { in ppl_recover_entry() 823 (data_disks - 1) * conf->chunk_sectors + in ppl_recover_entry() 827 strip_sectors = conf->chunk_sectors; in ppl_recover_entry() 861 (disk * conf->chunk_sectors); in ppl_recover_entry()
|
D | dm-unstripe.c | 180 limits->chunk_sectors = uc->chunk_size; in unstripe_io_hints()
|
D | raid5.h | 579 int chunk_sectors; member
|
D | raid10.c | 1832 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1834 dev_start = first_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1839 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; in raid10_handle_discard() 1841 dev_end = last_stripe_index * mddev->chunk_sectors; in raid10_handle_discard() 1943 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status() 3250 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high() 3987 chunk = mddev->chunk_sectors; in setup_geo() 4143 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt() 4187 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid10_run() 4406 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() [all …]
|
D | md.c | 1287 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate() 1316 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate() 1466 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync() 1797 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate() 1855 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate() 2014 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync() 2636 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing() 4007 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store() 4030 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store() 4232 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show() [all …]
|
D | md.h | 340 int chunk_sectors; member
|
D | dm-table.c | 1699 zone_sectors = ti_limits.chunk_sectors; in dm_calculate_queue_limits() 1740 zone_sectors = limits->chunk_sectors; in dm_calculate_queue_limits()
|
D | raid5-cache.c | 198 sector_div(sect, conf->chunk_sectors); in r5c_tree_index() 365 conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf))) in r5c_check_cached_full_stripe()
|
/linux-6.6.21/block/ |
D | blk-settings.c | 45 lim->chunk_sectors = 0; in blk_set_default_limits() 167 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument 169 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors() 225 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors() 612 if (b->chunk_sectors) in blk_stack_limits() 613 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits() 637 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits() 638 t->chunk_sectors = 0; in blk_stack_limits()
|
D | blk-zoned.c | 458 sector_t zone_sectors = q->limits.chunk_sectors; in blk_revalidate_zone_cb() 539 sector_t zone_sectors = q->limits.chunk_sectors; in blk_revalidate_disk_zones() 631 q->limits.chunk_sectors = 0; in disk_clear_zone_settings()
|
D | blk-merge.c | 176 if (lim->chunk_sectors) { in get_max_io_size() 179 lim->chunk_sectors)); in get_max_io_size() 600 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors() 605 blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); in blk_rq_get_max_sectors()
|
D | blk.h | 316 return lim->chunk_sectors || bio->bi_vcnt != 1 || in bio_may_exceed_limits()
|
D | blk-core.c | 589 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append()
|
D | blk-sysfs.c | 143 return queue_var_show(q->limits.chunk_sectors, page); in queue_chunk_sectors_show()
|
D | mq-deadline.c | 179 pos = round_down(pos, rq->q->limits.chunk_sectors); in deadline_from_pos()
|
/linux-6.6.21/drivers/char/ |
D | ps3flash.c | 26 u64 chunk_sectors; member 38 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 118 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 151 sector += priv->chunk_sectors; in ps3flash_read() 187 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 226 sector += priv->chunk_sectors; in ps3flash_write() 376 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|
/linux-6.6.21/include/linux/ |
D | blkdev.h | 294 unsigned int chunk_sectors; member 655 return sector >> ilog2(disk->queue->limits.chunk_sectors); in disk_zone_no() 880 unsigned int chunk_sectors) in blk_chunk_sectors_left() argument 882 if (unlikely(!is_power_of_2(chunk_sectors))) in blk_chunk_sectors_left() 883 return chunk_sectors - sector_div(offset, chunk_sectors); in blk_chunk_sectors_left() 884 return chunk_sectors - (offset & (chunk_sectors - 1)); in blk_chunk_sectors_left() 1295 return q->limits.chunk_sectors; in bdev_zone_sectors()
|
/linux-6.6.21/include/uapi/linux/ |
D | ublk_cmd.h | 343 __u32 chunk_sectors; member
|
/linux-6.6.21/Documentation/ABI/stable/ |
D | sysfs-block | 145 What: /sys/block/<disk>/queue/chunk_sectors 149 [RO] chunk_sectors has different meaning depending on the type 150 of the disk. For a RAID device (dm-raid), chunk_sectors 153 host-managed, chunk_sectors indicates the size in 512B sectors
|
/linux-6.6.21/drivers/block/ |
D | ublk_drv.c | 223 return p->dev_sectors >> ilog2(p->chunk_sectors); in ublk_get_nr_zones() 302 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; in ublk_report_zones() 517 blk_queue_chunk_sectors(q, p->chunk_sectors); in ublk_dev_param_basic_apply() 554 if (ublk_dev_is_zoned(ub) && !p->chunk_sectors) in ublk_validate_params()
|