Home
last modified time | relevance | path

Searched refs:srq (Results 1 – 25 of 106) sorted by relevance

12345

/linux-6.1.9/drivers/infiniband/hw/mthca/
Dmthca_srq.c74 static void *get_wqe(struct mthca_srq *srq, int n) in get_wqe() argument
76 if (srq->is_direct) in get_wqe()
77 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe()
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe()
99 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument
108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context()
110 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
120 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument
134 max = srq->max; in mthca_arbel_init_srq_context()
[all …]
/linux-6.1.9/drivers/infiniband/hw/hns/
Dhns_roce_srq.c15 struct hns_roce_srq *srq; in hns_roce_srq_event() local
18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1)); in hns_roce_srq_event()
19 if (srq) in hns_roce_srq_event()
20 refcount_inc(&srq->refcount); in hns_roce_srq_event()
23 if (!srq) { in hns_roce_srq_event()
28 srq->event(srq, event_type); in hns_roce_srq_event()
30 if (refcount_dec_and_test(&srq->refcount)) in hns_roce_srq_event()
31 complete(&srq->free); in hns_roce_srq_event()
34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, in hns_roce_ib_srq_event() argument
37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); in hns_roce_ib_srq_event()
[all …]
/linux-6.1.9/drivers/infiniband/hw/mlx4/
Dsrq.c42 static void *get_wqe(struct mlx4_ib_srq *srq, int n) in get_wqe() argument
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
47 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) in mlx4_ib_srq_event() argument
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx4_ib_srq_event()
54 event.element.srq = ibsrq; in mlx4_ib_srq_event()
64 "on SRQ %06x\n", type, srq->srqn); in mlx4_ib_srq_event()
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq); in mlx4_ib_create_srq() local
98 mutex_init(&srq->mutex); in mlx4_ib_create_srq()
99 spin_lock_init(&srq->lock); in mlx4_ib_create_srq()
100 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx4_ib_create_srq()
[all …]
/linux-6.1.9/drivers/infiniband/hw/mlx5/
Dsrq.c13 static void *get_wqe(struct mlx5_ib_srq *srq, int n) in get_wqe() argument
15 return mlx5_frag_buf_get_wqe(&srq->fbc, n); in get_wqe()
18 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) in mlx5_ib_srq_event() argument
21 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx5_ib_srq_event()
25 event.element.srq = ibsrq; in mlx5_ib_srq_event()
35 type, srq->srqn); in mlx5_ib_srq_event()
43 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, in create_srq_user() argument
76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user()
78 srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0); in create_srq_user()
79 if (IS_ERR(srq->umem)) { in create_srq_user()
[all …]
Dsrq_cmd.c84 struct mlx5_core_srq *srq; in mlx5_cmd_get_srq() local
87 srq = xa_load(&table->array, srqn); in mlx5_cmd_get_srq()
88 if (srq) in mlx5_cmd_get_srq()
89 refcount_inc(&srq->common.refcount); in mlx5_cmd_get_srq()
92 return srq; in mlx5_cmd_get_srq()
114 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, in create_srq_cmd() argument
157 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); in create_srq_cmd()
158 srq->uid = in->uid; in create_srq_cmd()
164 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) in destroy_srq_cmd() argument
169 MLX5_SET(destroy_srq_in, in, srqn, srq->srqn); in destroy_srq_cmd()
[all …]
Dsrq.h48 void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
58 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
60 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
61 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
63 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
/linux-6.1.9/drivers/infiniband/sw/rdmavt/
Dsrq.c38 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); in rvt_create_srq() local
54 srq->rq.size = srq_init_attr->attr.max_wr + 1; in rvt_create_srq()
55 srq->rq.max_sge = srq_init_attr->attr.max_sge; in rvt_create_srq()
56 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in rvt_create_srq()
58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz, in rvt_create_srq()
69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; in rvt_create_srq()
71 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); in rvt_create_srq()
72 if (IS_ERR(srq->ip)) { in rvt_create_srq()
73 ret = PTR_ERR(srq->ip); in rvt_create_srq()
77 ret = ib_copy_to_udata(udata, &srq->ip->offset, in rvt_create_srq()
[all …]
/linux-6.1.9/drivers/infiniband/sw/rxe/
Drxe_srq.c44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, in rxe_srq_from_init() argument
53 srq->ibsrq.event_handler = init->event_handler; in rxe_srq_from_init()
54 srq->ibsrq.srq_context = init->srq_context; in rxe_srq_from_init()
55 srq->limit = init->attr.srq_limit; in rxe_srq_from_init()
56 srq->srq_num = srq->elem.index; in rxe_srq_from_init()
57 srq->rq.max_wr = init->attr.max_wr; in rxe_srq_from_init()
58 srq->rq.max_sge = init->attr.max_sge; in rxe_srq_from_init()
60 srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); in rxe_srq_from_init()
62 spin_lock_init(&srq->rq.producer_lock); in rxe_srq_from_init()
63 spin_lock_init(&srq->rq.consumer_lock); in rxe_srq_from_init()
[all …]
Drxe_qp.c80 if (rxe_qp_chk_cap(rxe, cap, !!init->srq)) in rxe_qp_chk_init()
267 if (!qp->srq) { in rxe_qp_init_resp()
315 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; in rxe_qp_from_init() local
320 if (srq) in rxe_qp_from_init()
321 rxe_get(srq); in rxe_qp_from_init()
326 qp->srq = srq; in rxe_qp_from_init()
356 qp->srq = NULL; in rxe_qp_from_init()
358 if (srq) in rxe_qp_from_init()
359 rxe_put(srq); in rxe_qp_from_init()
374 init->srq = qp->ibqp.srq; in rxe_qp_to_init()
[all …]
Drxe_resp.c295 struct rxe_srq *srq = qp->srq; in get_srq_wqe() local
296 struct rxe_queue *q = srq->rq.queue; in get_srq_wqe()
303 if (srq->error) in get_srq_wqe()
306 spin_lock_irqsave(&srq->rq.consumer_lock, flags); in get_srq_wqe()
310 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe()
315 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) { in get_srq_wqe()
316 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe()
327 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) { in get_srq_wqe()
328 srq->limit = 0; in get_srq_wqe()
332 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags); in get_srq_wqe()
[all …]
Drxe_verbs.c289 struct rxe_srq *srq = to_rsrq(ibsrq); in rxe_create_srq() local
305 err = rxe_add_to_pool(&rxe->srq_pool, srq); in rxe_create_srq()
310 srq->pd = pd; in rxe_create_srq()
312 err = rxe_srq_from_init(rxe, srq, init, udata, uresp); in rxe_create_srq()
319 rxe_cleanup(srq); in rxe_create_srq()
329 struct rxe_srq *srq = to_rsrq(ibsrq); in rxe_modify_srq() local
342 err = rxe_srq_chk_attr(rxe, srq, attr, mask); in rxe_modify_srq()
346 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); in rxe_modify_srq()
354 struct rxe_srq *srq = to_rsrq(ibsrq); in rxe_query_srq() local
356 if (srq->error) in rxe_query_srq()
[all …]
/linux-6.1.9/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_srq.c65 struct pvrdma_srq *srq = to_vsrq(ibsrq); in pvrdma_query_srq() local
74 cmd->srq_handle = srq->srq_handle; in pvrdma_query_srq()
102 struct pvrdma_srq *srq = to_vsrq(ibsrq); in pvrdma_create_srq() local
137 spin_lock_init(&srq->lock); in pvrdma_create_srq()
138 refcount_set(&srq->refcnt, 1); in pvrdma_create_srq()
139 init_completion(&srq->free); in pvrdma_create_srq()
149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
[all …]
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/
Dsrq.c46 struct mlx4_srq *srq; in mlx4_srq_event() local
49 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event()
51 if (srq) in mlx4_srq_event()
52 refcount_inc(&srq->refcount); in mlx4_srq_event()
58 srq->event(srq, event_type); in mlx4_srq_event()
60 if (refcount_dec_and_test(&srq->refcount)) in mlx4_srq_event()
61 complete(&srq->free); in mlx4_srq_event()
163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument
171 err = mlx4_srq_alloc_icm(dev, &srq->srqn); in mlx4_srq_alloc()
176 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); in mlx4_srq_alloc()
[all …]
/linux-6.1.9/drivers/infiniband/hw/cxgb4/
Dt4.h425 static inline u32 t4_srq_avail(struct t4_srq *srq) in t4_srq_avail() argument
427 return srq->size - 1 - srq->in_use; in t4_srq_avail()
430 static inline void t4_srq_produce(struct t4_srq *srq, u8 len16) in t4_srq_produce() argument
432 srq->in_use++; in t4_srq_produce()
433 if (++srq->pidx == srq->size) in t4_srq_produce()
434 srq->pidx = 0; in t4_srq_produce()
435 srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE); in t4_srq_produce()
436 if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS) in t4_srq_produce()
437 srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS; in t4_srq_produce()
438 srq->queue[srq->size].status.host_pidx = srq->pidx; in t4_srq_produce()
[all …]
Dcq.c462 static void post_pending_srq_wrs(struct t4_srq *srq) in post_pending_srq_wrs() argument
467 while (srq->pending_in_use) { in post_pending_srq_wrs()
468 pwr = &srq->pending_wrs[srq->pending_cidx]; in post_pending_srq_wrs()
469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs()
470 srq->sw_rq[srq->pidx].valid = 1; in post_pending_srq_wrs()
474 srq->cidx, srq->pidx, srq->wq_pidx, in post_pending_srq_wrs()
475 srq->in_use, srq->size, in post_pending_srq_wrs()
478 c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16); in post_pending_srq_wrs()
479 t4_srq_consume_pending_wr(srq); in post_pending_srq_wrs()
480 t4_srq_produce(srq, pwr->len16); in post_pending_srq_wrs()
[all …]
Dqp.c1341 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, in defer_srq_wr() argument
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx]; in defer_srq_wr()
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx, in defer_srq_wr()
1348 srq->in_use, srq->ooo_count, in defer_srq_wr()
1349 (unsigned long long)wr_id, srq->pending_cidx, in defer_srq_wr()
1350 srq->pending_pidx, srq->pending_in_use); in defer_srq_wr()
1354 t4_srq_produce_pending_wr(srq); in defer_srq_wr()
1361 struct c4iw_srq *srq; in c4iw_post_srq_recv() local
1368 srq = to_c4iw_srq(ibsrq); in c4iw_post_srq_recv()
1369 spin_lock_irqsave(&srq->lock, flag); in c4iw_post_srq_recv()
[all …]
/linux-6.1.9/drivers/infiniband/sw/siw/
Dsiw_verbs.c348 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) { in siw_create_qp()
402 if (attrs->srq) { in siw_create_qp()
408 qp->srq = to_siw_srq(attrs->srq); in siw_create_qp()
534 qp_init_attr->srq = base_qp->srq; in siw_query_qp()
1004 if (qp->srq || qp->attrs.rq_size == 0) { in siw_post_receive()
1594 struct siw_srq *srq = to_siw_srq(base_srq); in siw_create_srq() local
1615 srq->max_sge = attrs->max_sge; in siw_create_srq()
1616 srq->num_rqe = roundup_pow_of_two(attrs->max_wr); in siw_create_srq()
1617 srq->limit = attrs->srq_limit; in siw_create_srq()
1618 if (srq->limit) in siw_create_srq()
[all …]
Dsiw_qp_rx.c333 struct siw_srq *srq; in siw_rqe_get() local
338 srq = qp->srq; in siw_rqe_get()
339 if (srq) { in siw_rqe_get()
340 spin_lock_irqsave(&srq->lock, flags); in siw_rqe_get()
341 if (unlikely(!srq->num_rqe)) in siw_rqe_get()
344 rqe = &srq->recvq[srq->rq_get % srq->num_rqe]; in siw_rqe_get()
378 if (srq) in siw_rqe_get()
379 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get()
382 if (!srq) { in siw_rqe_get()
385 if (srq->armed) { in siw_rqe_get()
[all …]
/linux-6.1.9/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c82 if (!qp->srq) { in __bnxt_qplib_add_flush_qp()
129 if (!qp->srq) { in __bnxt_qplib_del_flush_qp()
354 struct bnxt_qplib_srq *srq; in bnxt_qplib_service_nq() local
361 srq = (struct bnxt_qplib_srq *)q_handle; in bnxt_qplib_service_nq()
362 bnxt_qplib_armen_db(&srq->dbinfo, in bnxt_qplib_service_nq()
581 struct bnxt_qplib_srq *srq) in bnxt_qplib_destroy_srq() argument
592 req.srq_cid = cpu_to_le32(srq->id); in bnxt_qplib_destroy_srq()
596 kfree(srq->swq); in bnxt_qplib_destroy_srq()
599 bnxt_qplib_free_hwq(res, &srq->hwq); in bnxt_qplib_destroy_srq()
603 struct bnxt_qplib_srq *srq) in bnxt_qplib_create_srq() argument
[all …]
/linux-6.1.9/drivers/infiniband/core/
Duverbs_std_types_srq.c14 struct ib_srq *srq = uobject->object; in uverbs_free_srq() local
17 enum ib_srq_type srq_type = srq->srq_type; in uverbs_free_srq()
20 ret = ib_destroy_srq_user(srq, &attrs->driver_udata); in uverbs_free_srq()
46 struct ib_srq *srq; in UVERBS_HANDLER() local
107 srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata); in UVERBS_HANDLER()
108 if (IS_ERR(srq)) { in UVERBS_HANDLER()
109 ret = PTR_ERR(srq); in UVERBS_HANDLER()
113 obj->uevent.uobject.object = srq; in UVERBS_HANDLER()
131 &srq->ext.xrc.srq_num, in UVERBS_HANDLER()
132 sizeof(srq->ext.xrc.srq_num)); in UVERBS_HANDLER()
Dverbs.c1010 struct ib_srq *srq; in ib_create_srq_user() local
1013 srq = rdma_zalloc_drv_obj(pd->device, ib_srq); in ib_create_srq_user()
1014 if (!srq) in ib_create_srq_user()
1017 srq->device = pd->device; in ib_create_srq_user()
1018 srq->pd = pd; in ib_create_srq_user()
1019 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq_user()
1020 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq_user()
1021 srq->srq_type = srq_init_attr->srq_type; in ib_create_srq_user()
1022 srq->uobject = uobject; in ib_create_srq_user()
1024 if (ib_srq_has_cq(srq->srq_type)) { in ib_create_srq_user()
[all …]
Duverbs_std_types_qp.c93 struct ib_srq *srq = NULL; in UVERBS_HANDLER() local
217 srq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
219 if (!IS_ERR(srq)) { in UVERBS_HANDLER()
220 if ((srq->srq_type == IB_SRQT_XRC && in UVERBS_HANDLER()
222 (srq->srq_type != IB_SRQT_XRC && in UVERBS_HANDLER()
225 attr.srq = srq; in UVERBS_HANDLER()
/linux-6.1.9/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1118 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { in ocrdma_check_qp_params()
1173 int dpp_credit_lmt, int srq) in ocrdma_copy_qp_uresp() argument
1190 if (!srq) { in ocrdma_copy_qp_uresp()
1217 if (!srq) { in ocrdma_copy_qp_uresp()
1342 (attrs->srq != NULL)); in ocrdma_create_qp()
1543 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) in ocrdma_srq_toggle_bit() argument
1548 srq->idx_bit_fields[i] ^= mask; in ocrdma_srq_toggle_bit()
1610 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) in ocrdma_discard_cqes()
1627 if (qp->srq) { in ocrdma_discard_cqes()
1630 qp->srq->rq.max_wqe_idx; in ocrdma_discard_cqes()
[all …]
/linux-6.1.9/drivers/net/
Deql.c264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
416 slaving_request_t srq; in eql_enslave() local
418 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_enslave()
421 slave_dev = __dev_get_by_name(&init_net, srq.slave_name); in eql_enslave()
436 s->priority = srq.priority; in eql_enslave()
437 s->priority_bps = srq.priority; in eql_enslave()
438 s->priority_Bps = srq.priority / 8; in eql_enslave()
458 slaving_request_t srq; in eql_emancipate() local
461 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) in eql_emancipate()
[all …]
/linux-6.1.9/drivers/infiniband/hw/qedr/
Dverbs.c102 struct qedr_srq *srq = get_qedr_srq(ibsrq); in qedr_query_srq() local
104 srq_attr->srq_limit = srq->srq_limit; in qedr_query_srq()
1256 struct qedr_srq *srq, struct ib_udata *udata) in qedr_copy_srq_uresp() argument
1261 uresp.srq_id = srq->srq_id; in qedr_copy_srq_uresp()
1372 if (attrs->srq) in qedr_set_common_qp_params()
1373 qp->srq = get_qedr_srq(attrs->srq); in qedr_set_common_qp_params()
1387 qp->state, qp->signaled, (attrs->srq) ? 1 : 0); in qedr_set_common_qp_params()
1447 static void qedr_free_srq_user_params(struct qedr_srq *srq) in qedr_free_srq_user_params() argument
1449 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); in qedr_free_srq_user_params()
1450 ib_umem_release(srq->usrq.umem); in qedr_free_srq_user_params()
[all …]

12345