Searched refs:max_hw_sectors (Results 1 – 19 of 19) sorted by relevance
/linux-6.6.21/block/ |
D | blk-settings.c | 43 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits() 79 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits() 123 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors() argument 128 if ((max_hw_sectors << 9) < PAGE_SIZE) { in blk_queue_max_hw_sectors() 129 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors() 131 __func__, max_hw_sectors); in blk_queue_max_hw_sectors() 134 max_hw_sectors = round_down(max_hw_sectors, in blk_queue_max_hw_sectors() 136 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors() 138 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors() 224 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors() [all …]
|
D | blk-merge.c | 597 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
|
/linux-6.6.21/drivers/nvme/target/ |
D | passthru.c | 86 unsigned int max_hw_sectors; in nvmet_passthru_override_id_ctrl() local 105 max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT, in nvmet_passthru_override_id_ctrl() 106 pctrl->max_hw_sectors); in nvmet_passthru_override_id_ctrl() 112 max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT, in nvmet_passthru_override_id_ctrl() 113 max_hw_sectors); in nvmet_passthru_override_id_ctrl() 117 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
|
D | loop.c | 375 ctrl->ctrl.max_hw_sectors = in nvme_loop_configure_admin_queue()
|
/linux-6.6.21/drivers/block/rnbd/ |
D | rnbd-proto.h | 147 __le32 max_hw_sectors; member
|
D | README | 73 information: side, max_hw_sectors, etc.
|
D | rnbd-srv.c | 545 rsp->max_hw_sectors = in rnbd_srv_fill_msg_open_rsp()
|
/linux-6.6.21/drivers/nvme/host/ |
D | core.c | 1876 if (ctrl->max_hw_sectors) { in nvme_set_queue_limits() 1878 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; in nvme_set_queue_limits() 1881 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); in nvme_set_queue_limits() 1977 is_power_of_2(ctrl->max_hw_sectors)) in nvme_set_chunk_sectors() 1978 iob = ctrl->max_hw_sectors; in nvme_set_chunk_sectors() 2931 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; in nvme_init_non_mdts_limits() 3024 u32 max_hw_sectors; in nvme_init_identify() local 3083 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); in nvme_init_identify() 3085 max_hw_sectors = UINT_MAX; in nvme_init_identify() 3086 ctrl->max_hw_sectors = in nvme_init_identify() [all …]
|
D | zns.c | 43 ctrl->max_zone_append = ctrl->max_hw_sectors; in nvme_set_max_append()
|
D | nvme.h | 302 u32 max_hw_sectors; member
|
D | multipath.c | 910 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT; in nvme_mpath_init_identify()
|
D | apple.c | 1052 anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, in apple_nvme_reset_work()
|
D | rdma.c | 830 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
|
D | fc.c | 3115 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
|
D | pci.c | 2975 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
|
/linux-6.6.21/include/linux/ |
D | blkdev.h | 292 unsigned int max_hw_sectors; member 1107 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
|
/linux-6.6.21/drivers/block/drbd/ |
D | drbd_nl.c | 1275 unsigned int max_hw_sectors = max_bio_size >> 9; in drbd_setup_queue_param() local 1283 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); in drbd_setup_queue_param() 1292 blk_queue_max_hw_sectors(q, max_hw_sectors); in drbd_setup_queue_param()
|
/linux-6.6.21/drivers/md/bcache/ |
D | super.c | 952 q->limits.max_hw_sectors = UINT_MAX; in bcache_device_init()
|
/linux-6.6.21/drivers/scsi/ |
D | sd.c | 3542 q->limits.max_sectors > q->limits.max_hw_sectors) in sd_revalidate_disk()
|