/linux-6.1.9/block/ |
D | blk-mq.c | 605 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument 632 if (hctx_idx >= q->nr_hw_queues) in blk_mq_alloc_request_hctx() 644 data.hctx = xa_load(&q->hctx_table, hctx_idx); in blk_mq_alloc_request_hctx() 3190 unsigned int hctx_idx) in blk_mq_free_rqs() argument 3201 drv_tags = set->tags[hctx_idx]; in blk_mq_free_rqs() 3211 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs() 3241 unsigned int hctx_idx) in hctx_idx_to_type() argument 3249 if (hctx_idx >= start && hctx_idx < end) in hctx_idx_to_type() 3260 unsigned int hctx_idx) in blk_mq_get_hctx_node() argument 3262 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); in blk_mq_get_hctx_node() [all …]
|
D | blk-mq.h | 52 unsigned int hctx_idx); 55 unsigned int hctx_idx, unsigned int depth); 58 unsigned int hctx_idx);
|
D | bsg-lib.c | 302 unsigned int hctx_idx, unsigned int numa_node) in bsg_init_rq() argument 313 unsigned int hctx_idx) in bsg_exit_rq() argument
|
D | blk-mq-sched.c | 500 unsigned int hctx_idx) in blk_mq_sched_alloc_map_and_rqs() argument 507 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, in blk_mq_sched_alloc_map_and_rqs()
|
D | kyber-iosched.c | 466 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 521 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx() argument
|
D | mq-deadline.c | 632 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument
|
/linux-6.1.9/drivers/nvme/target/ |
D | loop.c | 204 struct request *req, unsigned int hctx_idx, in nvme_loop_init_request() argument 213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 219 unsigned int hctx_idx) in nvme_loop_init_hctx() argument 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_loop_init_hctx() 239 unsigned int hctx_idx) in nvme_loop_init_admin_hctx() argument 244 BUG_ON(hctx_idx != 0); in nvme_loop_init_admin_hctx()
|
/linux-6.1.9/drivers/mmc/core/ |
D | queue.c | 204 unsigned int hctx_idx, unsigned int numa_node) in mmc_mq_init_request() argument 219 unsigned int hctx_idx) in mmc_mq_exit_request() argument
|
/linux-6.1.9/drivers/nvme/host/ |
D | rdma.c | 287 struct request *rq, unsigned int hctx_idx) in nvme_rdma_exit_request() argument 295 struct request *rq, unsigned int hctx_idx, in nvme_rdma_init_request() argument 300 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request() 321 unsigned int hctx_idx) in nvme_rdma_init_hctx() argument 324 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() 326 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx() 333 unsigned int hctx_idx) in nvme_rdma_init_admin_hctx() argument 338 BUG_ON(hctx_idx != 0); in nvme_rdma_init_admin_hctx()
|
D | tcp.c | 454 struct request *rq, unsigned int hctx_idx) in nvme_tcp_exit_request() argument 462 struct request *rq, unsigned int hctx_idx, in nvme_tcp_init_request() argument 468 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request() 487 unsigned int hctx_idx) in nvme_tcp_init_hctx() argument 490 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() 497 unsigned int hctx_idx) in nvme_tcp_init_admin_hctx() argument
|
D | fc.c | 1828 unsigned int hctx_idx) in nvme_fc_exit_request() argument 2136 unsigned int hctx_idx, unsigned int numa_node) in nvme_fc_init_request() argument 2140 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request() 2221 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) in nvme_fc_init_hctx() argument 2223 return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1); in nvme_fc_init_hctx() 2228 unsigned int hctx_idx) in nvme_fc_init_admin_hctx() argument 2230 return __nvme_fc_init_hctx(hctx, data, hctx_idx); in nvme_fc_init_admin_hctx()
|
D | pci.c | 403 unsigned int hctx_idx) in nvme_admin_init_hctx() argument 408 WARN_ON(hctx_idx != 0); in nvme_admin_init_hctx() 416 unsigned int hctx_idx) in nvme_init_hctx() argument 419 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() 421 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx() 427 struct request *req, unsigned int hctx_idx, in nvme_pci_init_request() argument
|
D | apple.c | 776 unsigned int hctx_idx) in apple_nvme_init_hctx() argument 783 struct request *req, unsigned int hctx_idx, in apple_nvme_init_request() argument
|
/linux-6.1.9/drivers/md/ |
D | dm-rq.c | 455 unsigned int hctx_idx, unsigned int numa_node) in dm_mq_init_request() argument
|
/linux-6.1.9/drivers/mtd/ubi/ |
D | block.c | 336 struct request *req, unsigned int hctx_idx, in ubiblock_init_request() argument
|
/linux-6.1.9/drivers/scsi/ |
D | scsi_lib.c | 1809 unsigned int hctx_idx, unsigned int numa_node) in scsi_mq_init_request() argument 1837 unsigned int hctx_idx) in scsi_mq_exit_request() argument 1859 unsigned int hctx_idx) in scsi_init_hctx() argument
|
/linux-6.1.9/drivers/block/null_blk/ |
D | main.c | 1704 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in null_exit_hctx() argument 1722 unsigned int hctx_idx) in null_init_hctx() argument 1732 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
|
/linux-6.1.9/include/linux/ |
D | blk-mq.h | 734 unsigned int hctx_idx);
|
/linux-6.1.9/drivers/block/ |
D | ublk_drv.c | 877 unsigned int hctx_idx) in ublk_init_hctx() argument 887 unsigned int hctx_idx, unsigned int numa_node) in ublk_init_rq() argument
|
D | nbd.c | 1724 unsigned int hctx_idx, unsigned int numa_node) in nbd_init_request() argument
|
/linux-6.1.9/drivers/block/mtip32xx/ |
D | mtip32xx.c | 3333 unsigned int hctx_idx) in mtip_free_cmd() argument 3346 unsigned int hctx_idx, unsigned int numa_node) in mtip_init_cmd() argument
|