Home
last modified time | relevance | path

Searched refs:wqe (Results 1 – 25 of 135) sorted by relevance

123456

/linux-6.6.21/drivers/infiniband/sw/rxe/
Drxe_req.c14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
18 struct rxe_send_wqe *wqe, int npsn) in retry_first_write_send() argument
23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
24 qp->mtu : wqe->dma.resid; in retry_first_write_send()
26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
27 wqe->wr.opcode); in retry_first_write_send()
29 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
30 wqe->dma.resid -= to_send; in retry_first_write_send()
31 wqe->dma.sge_offset += to_send; in retry_first_write_send()
33 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send()
[all …]
Drxe_comp.c150 struct rxe_send_wqe *wqe; in get_wqe() local
155 wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); in get_wqe()
156 *wqe_p = wqe; in get_wqe()
159 if (!wqe || wqe->state == wqe_state_posted) in get_wqe()
163 if (wqe->state == wqe_state_done) in get_wqe()
167 if (wqe->state == wqe_state_error) in get_wqe()
183 struct rxe_send_wqe *wqe) in check_psn() argument
190 diff = psn_compare(pkt->psn, wqe->last_psn); in check_psn()
192 if (wqe->state == wqe_state_pending) { in check_psn()
193 if (wqe->mask & WR_ATOMIC_OR_READ_MASK) in check_psn()
[all …]
Drxe_mw.c50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_check_bind_mw() argument
83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw()
117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw()
123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw()
124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw()
135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_do_bind_mw() argument
138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw()
143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw()
144 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw()
164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in rxe_bind_mw() argument
[all …]
/linux-6.6.21/drivers/infiniband/sw/rdmavt/
Dtrace_tx.h49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge),
50 TP_ARGS(qp, wqe, wr_num_sge),
54 __field(struct rvt_swqe *, wqe)
73 __entry->wqe = wqe;
74 __entry->wr_id = wqe->wr.wr_id;
77 __entry->psn = wqe->psn;
78 __entry->lpsn = wqe->lpsn;
79 __entry->length = wqe->length;
80 __entry->opcode = wqe->wr.opcode;
86 __entry->ssn = wqe->ssn;
[all …]
Dqp.c591 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); in rvt_clear_mr_refs() local
593 rvt_put_qp_swqe(qp, wqe); in rvt_clear_mr_refs()
621 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) in rvt_swqe_has_lkey() argument
625 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey()
626 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_swqe_has_lkey()
644 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); in rvt_qp_sends_has_lkey() local
646 if (rvt_swqe_has_lkey(wqe, lkey)) in rvt_qp_sends_has_lkey()
979 struct rvt_swqe *wqe; in free_ud_wq_attr() local
983 wqe = rvt_get_swqe_ptr(qp, i); in free_ud_wq_attr()
984 kfree(wqe->ud_wr.attr); in free_ud_wq_attr()
[all …]
/linux-6.6.21/drivers/infiniband/sw/siw/
Dsiw_qp_tx.c42 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local
43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg()
46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg()
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg()
53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg()
55 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg()
120 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local
124 switch (tx_type(wqe)) { in siw_qp_prepare_tx()
136 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx()
138 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx()
[all …]
Dsiw_qp.c265 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local
271 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts()
275 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts()
277 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts()
278 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
279 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
280 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
281 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
282 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
287 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
[all …]
Dsiw_qp_rx.c169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local
176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh()
177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh()
204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh()
207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh()
281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local
301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh()
303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh()
316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh()
318 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh()
[all …]
/linux-6.6.21/drivers/infiniband/hw/irdma/
Duda.c23 __le64 *wqe; in irdma_sc_access_ah() local
26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah()
27 if (!wqe) in irdma_sc_access_ah()
30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); in irdma_sc_access_ah()
41 set_64bit_val(wqe, 40, in irdma_sc_access_ah()
44 set_64bit_val(wqe, 32, in irdma_sc_access_ah()
48 set_64bit_val(wqe, 56, in irdma_sc_access_ah()
51 set_64bit_val(wqe, 48, in irdma_sc_access_ah()
55 set_64bit_val(wqe, 32, in irdma_sc_access_ah()
58 set_64bit_val(wqe, 48, in irdma_sc_access_ah()
[all …]
Duk.c15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument
19 set_64bit_val(wqe, offset, in irdma_set_fragment()
21 set_64bit_val(wqe, offset + 8, in irdma_set_fragment()
26 set_64bit_val(wqe, offset, 0); in irdma_set_fragment()
27 set_64bit_val(wqe, offset + 8, in irdma_set_fragment()
39 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, in irdma_set_fragment_gen_1() argument
43 set_64bit_val(wqe, offset, in irdma_set_fragment_gen_1()
45 set_64bit_val(wqe, offset + 8, in irdma_set_fragment_gen_1()
49 set_64bit_val(wqe, offset, 0); in irdma_set_fragment_gen_1()
50 set_64bit_val(wqe, offset + 8, 0); in irdma_set_fragment_gen_1()
[all …]
Dctrl.c186 __le64 *wqe; in irdma_sc_add_arp_cache_entry() local
189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry()
190 if (!wqe) in irdma_sc_add_arp_cache_entry()
192 set_64bit_val(wqe, 8, info->reach_max); in irdma_sc_add_arp_cache_entry()
193 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); in irdma_sc_add_arp_cache_entry()
202 set_64bit_val(wqe, 24, hdr); in irdma_sc_add_arp_cache_entry()
205 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); in irdma_sc_add_arp_cache_entry()
222 __le64 *wqe; in irdma_sc_del_arp_cache_entry() local
225 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_del_arp_cache_entry()
226 if (!wqe) in irdma_sc_del_arp_cache_entry()
[all …]
/linux-6.6.21/drivers/infiniband/hw/hfi1/
Drc.c394 struct rvt_swqe *wqe; in hfi1_make_rc_req() local
449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req()
450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req()
495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req()
497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req()
506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req()
507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req()
517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req()
521 wqe->wr.ex.invalidate_rkey); in hfi1_make_rc_req()
[all …]
Duc.c26 struct rvt_swqe *wqe; in hfi1_make_uc_req() local
49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req()
50 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req()
72 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req()
88 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req()
89 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req()
97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req()
99 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req()
102 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req()
111 qp->s_psn = wqe->psn; in hfi1_make_uc_req()
[all …]
Dtid_rdma.h214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument
223 if (!wqe->priv) in trdma_clean_swqe()
225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe()
244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
263 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
265 struct rvt_swqe *wqe) in hfi1_setup_tid_rdma_wqe() argument
[all …]
Dtid_rdma.c378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local
386 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init()
387 wqe->priv = priv; in hfi1_qp_priv_init()
416 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local
421 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free()
422 kfree(wqe->priv); in hfi1_qp_priv_tid_free()
423 wqe->priv = NULL; in hfi1_qp_priv_tid_free()
1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument
1623 struct hfi1_swqe_priv *p = wqe->priv; in __trdma_clean_swqe()
1703 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, in hfi1_build_tid_rdma_read_packet() argument
[all …]
/linux-6.6.21/drivers/infiniband/hw/qib/
Dqib_rc.c42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, in restart_sge() argument
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge()
48 return rvt_restart_sge(ss, wqe, len); in restart_sge()
222 struct rvt_swqe *wqe; in qib_make_rc_req() local
252 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req()
253 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req()
276 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_rc_req()
297 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in qib_make_rc_req()
303 qp->s_psn = wqe->psn; in qib_make_rc_req()
310 len = wqe->length; in qib_make_rc_req()
[all …]
Dqib_uc.c53 struct rvt_swqe *wqe; in qib_make_uc_req() local
71 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req()
72 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req()
85 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req()
98 qp->s_psn = wqe->psn; in qib_make_uc_req()
99 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req()
100 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req()
101 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req()
102 qp->s_sge.total_len = wqe->length; in qib_make_uc_req()
103 len = wqe->length; in qib_make_uc_req()
[all …]
Dqib_ud.c238 struct rvt_swqe *wqe; in qib_make_ud_req() local
258 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req()
259 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req()
267 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req()
275 ah_attr = rvt_get_swqe_ah_attr(wqe); in qib_make_ud_req()
300 qib_ud_loopback(qp, wqe); in qib_make_ud_req()
303 rvt_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req()
309 extra_bytes = -wqe->length & 3; in qib_make_ud_req()
310 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req()
314 qp->s_cur_size = wqe->length; in qib_make_ud_req()
[all …]
/linux-6.6.21/drivers/infiniband/hw/mlx5/
Dumr.c228 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send() argument
259 mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size); in mlx5r_umr_post_send()
289 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send_wait() argument
295 err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask)); in mlx5r_umr_post_send_wait()
316 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, in mlx5r_umr_post_send_wait()
361 struct mlx5r_umr_wqe wqe = {}; in mlx5r_umr_revoke_mr() local
366 wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask(); in mlx5r_umr_revoke_mr()
367 wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask(); in mlx5r_umr_revoke_mr()
368 wqe.ctrl_seg.flags |= MLX5_UMR_INLINE; in mlx5r_umr_revoke_mr()
370 MLX5_SET(mkc, &wqe.mkey_seg, free, 1); in mlx5r_umr_revoke_mr()
[all …]
/linux-6.6.21/drivers/infiniband/hw/cxgb4/
Dqp.c489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument
501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
506 wqe->send.stag_inv = 0; in build_rdma_send()
510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
521 wqe->send.r3 = 0; in build_rdma_send()
522 wqe->send.r4 = 0; in build_rdma_send()
527 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
[all …]
/linux-6.6.21/drivers/scsi/lpfc/
Dlpfc_nvmet.c80 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local
83 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template()
84 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template()
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template()
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template()
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template()
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template()
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template()
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); in lpfc_nvmet_cmd_template()
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template()
[all …]
Dlpfc_nvme.c399 union lpfc_wqe128 *wqe; in lpfc_nvme_gen_req() local
410 wqe = &genwqe->wqe; in lpfc_nvme_gen_req()
412 memset(wqe, 0, sizeof(union lpfc_wqe)); in lpfc_nvme_gen_req()
450 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; in lpfc_nvme_gen_req()
451 wqe->generic.bde.tus.f.bdeSize = first_len; in lpfc_nvme_gen_req()
452 wqe->generic.bde.addrLow = bpl[0].addrLow; in lpfc_nvme_gen_req()
453 wqe->generic.bde.addrHigh = bpl[0].addrHigh; in lpfc_nvme_gen_req()
456 wqe->gen_req.request_payload_len = first_len; in lpfc_nvme_gen_req()
461 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); in lpfc_nvme_gen_req()
462 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); in lpfc_nvme_gen_req()
[all …]
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dktls_txrx.c74 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, in mlx5e_ktls_build_static_params() argument
80 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in mlx5e_ktls_build_static_params()
81 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_static_params()
86 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_static_params()
97 fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn); in mlx5e_ktls_build_static_params()
117 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, in mlx5e_ktls_build_progress_params() argument
123 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_progress_params()
128 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_progress_params()
136 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); in mlx5e_ktls_build_progress_params()
/linux-6.6.21/drivers/infiniband/hw/bnxt_re/
Dib_verbs.c402 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; in bnxt_re_create_fence_wqe() local
404 memset(wqe, 0, sizeof(*wqe)); in bnxt_re_create_fence_wqe()
405 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; in bnxt_re_create_fence_wqe()
406 wqe->wr_id = BNXT_QPLIB_FENCE_WRID; in bnxt_re_create_fence_wqe()
407 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; in bnxt_re_create_fence_wqe()
408 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; in bnxt_re_create_fence_wqe()
409 wqe->bind.zero_based = false; in bnxt_re_create_fence_wqe()
410 wqe->bind.parent_l_key = ib_mr->lkey; in bnxt_re_create_fence_wqe()
411 wqe->bind.va = (u64)(unsigned long)fence->va; in bnxt_re_create_fence_wqe()
412 wqe->bind.length = fence->size; in bnxt_re_create_fence_wqe()
[all …]
/linux-6.6.21/drivers/infiniband/hw/mthca/
Dmthca_srq.c92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument
94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link()
158 void *wqe; in mthca_alloc_srq_buf() local
185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf()
188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf()
191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf()
195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf()
196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf()
495 void *wqe; in mthca_tavor_post_srq_recv() local
504 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv()
[all …]

123456