Home
last modified time | relevance | path

Searched refs:msrq (Results 1 – 10 of 10) sorted by relevance

/linux-6.6.21/drivers/infiniband/hw/mlx4/
Dsrq.c44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
100 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq()
101 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx4_ib_create_srq()
105 srq->msrq.max_gs * in mlx4_ib_create_srq()
107 srq->msrq.wqe_shift = ilog2(desc_size); in mlx4_ib_create_srq()
109 buf_size = srq->msrq.max * desc_size; in mlx4_ib_create_srq()
149 srq->tail = srq->msrq.max - 1; in mlx4_ib_create_srq()
152 for (i = 0; i < srq->msrq.max; ++i) { in mlx4_ib_create_srq()
155 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in mlx4_ib_create_srq()
172 srq->wrid = kvmalloc_array(srq->msrq.max, in mlx4_ib_create_srq()
[all …]
Dmlx4_ib.h376 struct mlx4_srq msrq; member
730 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) in to_mibsrq() argument
732 return container_of(msrq, struct mlx4_ib_srq, msrq); in to_mibsrq()
Dcq.c664 struct mlx4_srq *msrq = NULL; in mlx4_ib_poll_one() local
727 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
744 } else if (msrq) { in mlx4_ib_poll_one()
745 srq = to_mibsrq(msrq); in mlx4_ib_poll_one()
Dqp.c2433 to_msrq(ibsrq)->msrq.srqn); in __mlx4_ib_modify_qp()
/linux-6.6.21/drivers/infiniband/hw/mlx5/
Dsrq.c125 mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max), in create_srq_kernel()
129 srq->tail = srq->msrq.max - 1; in create_srq_kernel()
132 for (i = 0; i < srq->msrq.max; i++) { in create_srq_kernel()
135 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in create_srq_kernel()
146 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); in create_srq_kernel()
218 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx5_ib_create_srq()
219 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx5_ib_create_srq()
222 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_create_srq()
223 if (desc_size == 0 || srq->msrq.max_gs > desc_size) in mlx5_ib_create_srq()
231 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / in mlx5_ib_create_srq()
[all …]
Dcq.c179 struct mlx5_core_srq *msrq = NULL; in handle_responder() local
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder()
183 if (msrq) in handle_responder()
184 srq = to_mibsrq(msrq); in handle_responder()
192 if (msrq) in handle_responder()
193 mlx5_core_res_put(&msrq->common); in handle_responder()
Dmlx5_ib.h592 struct mlx5_core_srq msrq; member
1232 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) in to_mibsrq() argument
1234 return container_of(msrq, struct mlx5_ib_srq, msrq); in to_mibsrq()
Dqp.c293 srq->msrq.max, srq->msrq.wqe_shift, in mlx5_ib_read_user_wqe_srq()
306 size_t wqe_size = 1 << srq->msrq.wqe_shift; in mlx5_ib_read_wqe_srq()
2034 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); in create_xrc_tgt_qp()
2166 to_msrq(init_attr->srq)->msrq.srqn); in create_dci()
2170 to_msrq(devr->s1)->msrq.srqn); in create_dci()
2358 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); in create_user_qp()
2363 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); in create_user_qp()
2366 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); in create_user_qp()
2506 to_msrq(attr->srq)->msrq.srqn); in create_kernel_qp()
2510 to_msrq(devr->s1)->msrq.srqn); in create_kernel_qp()
[all …]
Dodp.c1118 int wqe_size = 1 << srq->msrq.wqe_shift; in mlx5_ib_mr_responder_pfault_handler_srq()
1187 struct mlx5_core_srq *msrq = in res_to_srq() local
1190 return to_mibsrq(msrq); in res_to_srq()
Ddevx.c608 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
626 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()