Home
last modified time | relevance | path

Searched refs:sqsize (Results 1 – 18 of 18) sorted by relevance

/linux-6.1.9/drivers/nvme/target/
Dfabrics-cmd.c141 u16 sqsize = le16_to_cpu(c->sqsize); in nvmet_install_queue() local
146 if (!sqsize) { in nvmet_install_queue()
148 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue()
149 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
160 if (sqsize > mqes) { in nvmet_install_queue()
162 sqsize, mqes, ctrl->cntlid); in nvmet_install_queue()
163 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue()
164 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue()
176 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue()
177 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
Dfc.c132 u16 sqsize; member
633 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
668 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
792 u16 qid, u16 sqsize) in nvmet_fc_alloc_target_queue() argument
800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue()
814 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue()
896 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
1676 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association()
1687 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association()
1765 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection()
[all …]
Dtrace.c157 u16 sqsize = get_unaligned_le16(spc + 4); in nvmet_trace_fabrics_connect() local
162 recfmt, qid, sqsize, cattr, kato); in nvmet_trace_fabrics_connect()
Dloop.c578 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl()
/linux-6.1.9/include/linux/
Dnvme-fc.h260 __be16 sqsize; member
288 __be16 sqsize; member
Dnvme.h1478 __le16 sqsize; member
/linux-6.1.9/drivers/nvme/host/
Drdma.c777 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
1062 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1065 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1068 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { in nvme_rdma_setup_ctrl()
1071 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); in nvme_rdma_setup_ctrl()
1072 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; in nvme_rdma_setup_ctrl()
1075 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1078 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1079 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1894 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
[all …]
Dtrace.c269 u16 sqsize = get_unaligned_le16(spc + 4); in nvme_trace_fabrics_connect() local
274 recfmt, qid, sqsize, cattr, kato); in nvme_trace_fabrics_connect()
Dfabrics.c383 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvmf_connect_admin_queue()
467 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); in nvmf_connect_io_queue()
Dfc.c1217 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue()
1339 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue()
2912 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2916 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2972 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2976 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
3138 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_create_association()
3499 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
Dtcp.c2050 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2053 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2055 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2058 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2059 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2568 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
Dnvme.h295 u16 sqsize; member
Dcore.c3262 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); in nvme_init_ctrl_finish()
3523 nvme_show_int_function(sqsize);
4906 set->queue_depth = ctrl->sqsize + 1; in nvme_alloc_io_tag_set()
Dapple.c1085 anv->ctrl.sqsize = in apple_nvme_reset_work()
Dpci.c2608 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
/linux-6.1.9/drivers/infiniband/hw/irdma/
Dhw.c904 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; in irdma_create_cqp() local
912 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); in irdma_create_cqp()
916 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); in irdma_create_cqp()
924 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize, in irdma_create_cqp()
943 cqp_init_info.sq_size = sqsize; in irdma_create_cqp()
981 for (i = 0; i < sqsize; i++) { in irdma_create_cqp()
/linux-6.1.9/drivers/infiniband/hw/cxgb4/
Dqp.c2119 unsigned int sqsize, rqsize = 0; in c4iw_create_qp() local
2149 sqsize = attrs->cap.max_send_wr + 1; in c4iw_create_qp()
2150 if (sqsize < 8) in c4iw_create_qp()
2151 sqsize = 8; in c4iw_create_qp()
2157 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
2159 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2183 attrs->cap.max_send_wr = sqsize - 1; in c4iw_create_qp()
/linux-6.1.9/drivers/infiniband/sw/rdmavt/
Dqp.c1041 size_t sqsize; in rvt_create_qp() local
1067 sqsize = in rvt_create_qp()
1081 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node); in rvt_create_qp()
1155 qp->s_size = sqsize; in rvt_create_qp()