/linux-6.6.21/block/ ! |
D | blk-mq.c | 612 blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument 640 if (hctx_idx >= q->nr_hw_queues) in blk_mq_alloc_request_hctx() 652 data.hctx = xa_load(&q->hctx_table, hctx_idx); in blk_mq_alloc_request_hctx() 3284 unsigned int hctx_idx) in blk_mq_free_rqs() argument 3295 drv_tags = set->tags[hctx_idx]; in blk_mq_free_rqs() 3305 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs() 3335 unsigned int hctx_idx) in hctx_idx_to_type() argument 3343 if (hctx_idx >= start && hctx_idx < end) in hctx_idx_to_type() 3354 unsigned int hctx_idx) in blk_mq_get_hctx_node() argument 3356 enum hctx_type type = hctx_idx_to_type(set, hctx_idx); in blk_mq_get_hctx_node() [all …]
|
D | blk-mq.h | 59 unsigned int hctx_idx); 62 unsigned int hctx_idx, unsigned int depth); 65 unsigned int hctx_idx);
|
D | blk-mq-sched.c | 384 unsigned int hctx_idx) in blk_mq_sched_alloc_map_and_rqs() argument 391 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, in blk_mq_sched_alloc_map_and_rqs()
|
D | bsg-lib.c | 302 unsigned int hctx_idx, unsigned int numa_node) in bsg_init_rq() argument 313 unsigned int hctx_idx) in bsg_exit_rq() argument
|
D | kyber-iosched.c | 464 static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_init_hctx() argument 519 static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in kyber_exit_hctx() argument
|
D | mq-deadline.c | 657 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in dd_init_hctx() argument
|
/linux-6.6.21/drivers/nvme/target/ ! |
D | loop.c | 204 struct request *req, unsigned int hctx_idx, in nvme_loop_init_request() argument 213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); in nvme_loop_init_request() 219 unsigned int hctx_idx) in nvme_loop_init_hctx() argument 222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_loop_init_hctx() 239 unsigned int hctx_idx) in nvme_loop_init_admin_hctx() argument 244 BUG_ON(hctx_idx != 0); in nvme_loop_init_admin_hctx()
|
/linux-6.6.21/drivers/mmc/core/ ! |
D | queue.c | 204 unsigned int hctx_idx, unsigned int numa_node) in mmc_mq_init_request() argument 219 unsigned int hctx_idx) in mmc_mq_exit_request() argument
|
/linux-6.6.21/drivers/nvme/host/ ! |
D | rdma.c | 286 struct request *rq, unsigned int hctx_idx) in nvme_rdma_exit_request() argument 294 struct request *rq, unsigned int hctx_idx, in nvme_rdma_init_request() argument 299 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request() 320 unsigned int hctx_idx) in nvme_rdma_init_hctx() argument 323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() 325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx() 332 unsigned int hctx_idx) in nvme_rdma_init_admin_hctx() argument 337 BUG_ON(hctx_idx != 0); in nvme_rdma_init_admin_hctx()
|
D | tcp.c | 467 struct request *rq, unsigned int hctx_idx) in nvme_tcp_exit_request() argument 475 struct request *rq, unsigned int hctx_idx, in nvme_tcp_init_request() argument 481 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request() 500 unsigned int hctx_idx) in nvme_tcp_init_hctx() argument 503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() 510 unsigned int hctx_idx) in nvme_tcp_init_admin_hctx() argument
|
D | fc.c | 1834 unsigned int hctx_idx) in nvme_fc_exit_request() argument 2142 unsigned int hctx_idx, unsigned int numa_node) in nvme_fc_init_request() argument 2146 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request() 2227 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) in nvme_fc_init_hctx() argument 2229 return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1); in nvme_fc_init_hctx() 2234 unsigned int hctx_idx) in nvme_fc_init_admin_hctx() argument 2236 return __nvme_fc_init_hctx(hctx, data, hctx_idx); in nvme_fc_init_admin_hctx()
|
D | pci.c | 396 unsigned int hctx_idx) in nvme_admin_init_hctx() argument 401 WARN_ON(hctx_idx != 0); in nvme_admin_init_hctx() 409 unsigned int hctx_idx) in nvme_init_hctx() argument 412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() 414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx() 420 struct request *req, unsigned int hctx_idx, in nvme_pci_init_request() argument
|
D | apple.c | 776 unsigned int hctx_idx) in apple_nvme_init_hctx() argument 783 struct request *req, unsigned int hctx_idx, in apple_nvme_init_request() argument
|
/linux-6.6.21/drivers/md/ ! |
D | dm-rq.c | 458 unsigned int hctx_idx, unsigned int numa_node) in dm_mq_init_request() argument
|
/linux-6.6.21/drivers/mtd/ubi/ ! |
D | block.c | 314 struct request *req, unsigned int hctx_idx, in ubiblock_init_request() argument
|
/linux-6.6.21/drivers/scsi/ ! |
D | scsi_lib.c | 1810 unsigned int hctx_idx, unsigned int numa_node) in scsi_mq_init_request() argument 1838 unsigned int hctx_idx) in scsi_mq_exit_request() argument 1860 unsigned int hctx_idx) in scsi_init_hctx() argument
|
/linux-6.6.21/drivers/block/null_blk/ ! |
D | main.c | 1769 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in null_exit_hctx() argument 1787 unsigned int hctx_idx) in null_init_hctx() argument 1795 nq = &nullb->queues[hctx_idx]; in null_init_hctx()
|
/linux-6.6.21/include/linux/ ! |
D | blk-mq.h | 730 unsigned int hctx_idx);
|
/linux-6.6.21/drivers/block/mtip32xx/ ! |
D | mtip32xx.c | 3333 unsigned int hctx_idx) in mtip_free_cmd() argument 3346 unsigned int hctx_idx, unsigned int numa_node) in mtip_init_cmd() argument
|
/linux-6.6.21/drivers/block/ ! |
D | ublk_drv.c | 1321 unsigned int hctx_idx) in ublk_init_hctx() argument
|
D | nbd.c | 1771 unsigned int hctx_idx, unsigned int numa_node) in nbd_init_request() argument
|