Searched refs:max_hw_sectors (Results 1 – 19 of 19) sorted by relevance
/linux-6.1.9/block/ |
D | blk-settings.c | 42 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits() 78 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits() 122 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument 127 if ((max_hw_sectors << 9) < PAGE_SIZE) { in blk_queue_max_hw_sectors() 128 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors() 130 __func__, max_hw_sectors); in blk_queue_max_hw_sectors() 133 max_hw_sectors = round_down(max_hw_sectors, in blk_queue_max_hw_sectors() 135 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors() 137 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors() 218 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors() [all …]
|
D | blk-merge.c | 593 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
|
/linux-6.1.9/drivers/nvme/target/ |
D | passthru.c | 86 unsigned int max_hw_sectors; in nvmet_passthru_override_id_ctrl() local 105 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl() 106 pctrl->max_hw_sectors); in nvmet_passthru_override_id_ctrl() 112 max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9), in nvmet_passthru_override_id_ctrl() 113 max_hw_sectors); in nvmet_passthru_override_id_ctrl() 117 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
|
D | loop.c | 375 ctrl->ctrl.max_hw_sectors = in nvme_loop_configure_admin_queue()
|
/linux-6.1.9/drivers/block/rnbd/ |
D | rnbd-proto.h | 138 __le32 max_hw_sectors; member
|
D | README | 73 information: side, max_hw_sectors, etc.
|
D | rnbd-srv.c | 547 rsp->max_hw_sectors = in rnbd_srv_fill_msg_open_rsp()
|
/linux-6.1.9/drivers/nvme/host/ |
D | core.c | 1848 if (ctrl->max_hw_sectors) { in nvme_set_queue_limits() 1850 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; in nvme_set_queue_limits() 1853 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); in nvme_set_queue_limits() 1948 is_power_of_2(ctrl->max_hw_sectors)) in nvme_set_chunk_sectors() 1949 iob = ctrl->max_hw_sectors; in nvme_set_chunk_sectors() 3063 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; in nvme_init_non_mdts_limits() 3096 u32 max_hw_sectors; in nvme_init_identify() local 3157 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); in nvme_init_identify() 3159 max_hw_sectors = UINT_MAX; in nvme_init_identify() 3160 ctrl->max_hw_sectors = in nvme_init_identify() [all …]
|
D | zns.c | 44 ctrl->max_zone_append = ctrl->max_hw_sectors; in nvme_set_max_append()
|
D | nvme.h | 282 u32 max_hw_sectors; member
|
D | multipath.c | 877 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT; in nvme_mpath_init_identify()
|
D | apple.c | 1038 anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, in apple_nvme_reset_work()
|
D | rdma.c | 867 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
|
D | pci.c | 2851 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_reset_work()
|
D | fc.c | 3104 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
|
/linux-6.1.9/include/linux/ |
D | blkdev.h | 287 unsigned int max_hw_sectors; member 1138 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
|
/linux-6.1.9/drivers/block/drbd/ |
D | drbd_nl.c | 1263 unsigned int max_hw_sectors = max_bio_size >> 9; in drbd_setup_queue_param() local 1271 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); in drbd_setup_queue_param() 1280 blk_queue_max_hw_sectors(q, max_hw_sectors); in drbd_setup_queue_param()
|
/linux-6.1.9/drivers/md/bcache/ |
D | super.c | 950 q->limits.max_hw_sectors = UINT_MAX; in bcache_device_init()
|
/linux-6.1.9/drivers/scsi/ |
D | sd.c | 3315 q->limits.max_sectors > q->limits.max_hw_sectors) in sd_revalidate_disk()
|