Home
last modified time | relevance | path

Searched refs:ibqp (Results 1 – 25 of 128) sorted by relevance

123456

/linux-6.6.21/drivers/infiniband/hw/mlx5/
Dwr.h104 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
106 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
109 static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp, in mlx5_ib_post_send_nodrain() argument
113 return mlx5_ib_post_send(ibqp, wr, bad_wr, false); in mlx5_ib_post_send_nodrain()
116 static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp, in mlx5_ib_post_send_drain() argument
120 return mlx5_ib_post_send(ibqp, wr, bad_wr, true); in mlx5_ib_post_send_drain()
123 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp, in mlx5_ib_post_recv_nodrain() argument
127 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false); in mlx5_ib_post_recv_nodrain()
130 static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp, in mlx5_ib_post_recv_drain() argument
134 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true); in mlx5_ib_post_recv_drain()
/linux-6.6.21/drivers/infiniband/hw/hfi1/
Dqp.c163 struct ib_qp *ibqp = &qp->ibqp; in hfi1_check_modify_qp() local
164 struct hfi1_ibdev *dev = to_idev(ibqp->device); in hfi1_check_modify_qp()
169 sc = ah_to_sc(ibqp->device, &attr->ah_attr); in hfi1_check_modify_qp()
182 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); in hfi1_check_modify_qp()
209 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr); in qp_set_16b()
217 ibp = to_iport(qp->ibqp.device, qp->port_num); in qp_set_16b()
225 struct ib_qp *ibqp = &qp->ibqp; in hfi1_modify_qp() local
229 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp()
239 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); in hfi1_modify_qp()
265 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in hfi1_setup_wqe()
[all …]
Dud.c33 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in ud_loopback()
47 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, in ud_loopback()
55 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
56 IB_QPT_UD : sqp->ibqp.qp_type; in ud_loopback()
57 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in ud_loopback()
58 IB_QPT_UD : qp->ibqp.qp_type; in ud_loopback()
69 if (qp->ibqp.qp_num > 1) { in ud_loopback()
82 sqp->ibqp.qp_num, qp->ibqp.qp_num, in ud_loopback()
93 if (qp->ibqp.qp_num) { in ud_loopback()
131 if (qp->ibqp.qp_num == 0) in ud_loopback()
[all …]
Dtrace_tid.h198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
208 __entry->qpn = qp->ibqp.qp_num;
258 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
265 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
266 __entry->qpn = qp->ibqp.qp_num;
305 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
317 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
318 __entry->qpn = qp->ibqp.qp_num;
360 __entry->qpn = qp ? qp->ibqp.qp_num : 0;
[all …]
Dtrace_rc.h21 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
32 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
33 __entry->qpn = qp->ibqp.qp_num;
83 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
92 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
93 __entry->qpn = qp->ibqp.qp_num;
/linux-6.6.21/drivers/infiniband/sw/rdmavt/
Dqp.c426 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_free_qp_cb()
429 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); in rvt_free_qp_cb()
582 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_clear_mr_refs()
666 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_qp_acks_has_lkey()
692 if (qp->ibqp.qp_type == IB_QPT_SMI || in rvt_qp_mr_clean()
693 qp->ibqp.qp_type == IB_QPT_GSI) in rvt_qp_mr_clean()
714 ev.device = qp->ibqp.device; in rvt_qp_mr_clean()
715 ev.element.qp = &qp->ibqp; in rvt_qp_mr_clean()
717 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in rvt_qp_mr_clean()
732 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits); in rvt_remove_qp()
[all …]
Dqp.h13 int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
15 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
17 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
18 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
20 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
22 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
Dtrace_qp.h21 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
26 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
27 __entry->qpn = qp->ibqp.qp_num;
51 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
58 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
59 __entry->qpn = qp->ibqp.qp_num;
Dtrace_tx.h52 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
72 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
75 __entry->qpn = qp->ibqp.qp_num;
76 __entry->qpt = qp->ibqp.qp_type;
119 RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
131 RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device));
134 __entry->qpn = qp->ibqp.qp_num;
135 __entry->qpt = qp->ibqp.qp_type;
Dmcast.c238 int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in rvt_attach_mcast() argument
240 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); in rvt_attach_mcast()
241 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_attach_mcast()
247 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) in rvt_attach_mcast()
301 int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in rvt_detach_mcast() argument
303 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); in rvt_detach_mcast()
304 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_detach_mcast()
312 if (ibqp->qp_num <= 1) in rvt_detach_mcast()
/linux-6.6.21/drivers/infiniband/hw/mana/
Dqp.c95 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, in mana_ib_create_qp_rss() argument
99 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); in mana_ib_create_qp_rss()
256 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, in mana_ib_create_qp_raw() argument
261 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); in mana_ib_create_qp_raw()
407 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, in mana_ib_create_qp() argument
414 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr, in mana_ib_create_qp()
417 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata); in mana_ib_create_qp()
420 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n", in mana_ib_create_qp()
427 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, in mana_ib_modify_qp() argument
439 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); in mana_ib_destroy_qp_rss()
[all …]
/linux-6.6.21/drivers/infiniband/hw/qib/
Dqib_ud.c53 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); in qib_ud_loopback()
73 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
74 IB_QPT_UD : sqp->ibqp.qp_type; in qib_ud_loopback()
75 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? in qib_ud_loopback()
76 IB_QPT_UD : qp->ibqp.qp_type; in qib_ud_loopback()
87 if (qp->ibqp.qp_num > 1) { in qib_ud_loopback()
99 sqp->ibqp.qp_num, qp->ibqp.qp_num, in qib_ud_loopback()
111 if (qp->ibqp.qp_num) { in qib_ud_loopback()
149 if (qp->ibqp.qp_num == 0) in qib_ud_loopback()
204 wc.qp = &qp->ibqp; in qib_ud_loopback()
[all …]
Dqib_ruc.c53 ev.device = qp->ibqp.device; in qib_migrate_qp()
54 ev.element.qp = &qp->ibqp; in qib_migrate_qp()
56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); in qib_migrate_qp()
114 0, qp->ibqp.qp_num, in qib_ruc_check_hdr()
153 0, qp->ibqp.qp_num, in qib_ruc_check_hdr()
210 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_make_ruc_header()
266 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); in qib_do_send()
271 if ((qp->ibqp.qp_type == IB_QPT_RC || in qib_do_send()
272 qp->ibqp.qp_type == IB_QPT_UC) && in qib_do_send()
279 if (qp->ibqp.qp_type == IB_QPT_RC) in qib_do_send()
[all …]
/linux-6.6.21/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_qp.c61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
62 *recv_cq = to_vcq(qp->ibqp.recv_cq); in get_cqs()
191 int pvrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, in pvrdma_create_qp() argument
194 struct pvrdma_qp *qp = to_vqp(ibqp); in pvrdma_create_qp()
195 struct pvrdma_dev *dev = to_vdev(ibqp->device); in pvrdma_create_qp()
234 init_attr->port_num > ibqp->device->phys_port_cnt) { in pvrdma_create_qp()
271 qp->rumem = ib_umem_get(ibqp->device, in pvrdma_create_qp()
284 qp->sumem = ib_umem_get(ibqp->device, ucmd.sbuf_addr, in pvrdma_create_qp()
302 ret = pvrdma_set_sq_size(to_vdev(ibqp->device), in pvrdma_create_qp()
307 ret = pvrdma_set_rq_size(to_vdev(ibqp->device), in pvrdma_create_qp()
[all …]
/linux-6.6.21/drivers/infiniband/hw/erdma/
Derdma_verbs.h218 struct ib_qp ibqp; member
271 #define QP_ID(qp) ((qp)->ibqp.qp_num)
307 return container_of(qp, struct erdma_qp, ibqp); in to_eqp()
335 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
337 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
339 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
341 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
350 void erdma_qp_get_ref(struct ib_qp *ibqp);
351 void erdma_qp_put_ref(struct ib_qp *ibqp);
353 int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
[all …]
/linux-6.6.21/drivers/infiniband/hw/mthca/
Dmthca_qp.c261 event.element.qp = &qp->ibqp; in mthca_qp_event()
262 if (qp->ibqp.event_handler) in mthca_qp_event()
263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); in mthca_qp_event()
430 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, in mthca_query_qp() argument
433 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_query_qp()
434 struct mthca_qp *qp = to_mqp(ibqp); in mthca_query_qp()
556 static int __mthca_modify_qp(struct ib_qp *ibqp, in __mthca_modify_qp() argument
562 struct mthca_dev *dev = to_mdev(ibqp->device); in __mthca_modify_qp()
563 struct mthca_qp *qp = to_mqp(ibqp); in __mthca_modify_qp()
626 if (qp->ibqp.uobject) in __mthca_modify_qp()
[all …]
Dmthca_mcg.c120 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_attach() argument
122 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_attach()
165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { in mthca_multicast_attach()
167 ibqp->qp_num); in mthca_multicast_attach()
171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); in mthca_multicast_attach()
214 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) in mthca_multicast_detach() argument
216 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_detach()
242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) in mthca_multicast_detach()
249 mthca_err(dev, "QP %06x not found in MGM\n", ibqp->qp_num); in mthca_multicast_detach()
/linux-6.6.21/drivers/infiniband/hw/usnic/
Dusnic_ib_qp_grp.h49 struct ib_qp ibqp; member
105 struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp) in to_uqp_grp() argument
107 return container_of(ibqp, struct usnic_ib_qp_grp, ibqp); in to_uqp_grp()
/linux-6.6.21/drivers/infiniband/hw/mlx4/
Dqp.c215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx4_ib_handle_qp_event() local
218 event.device = ibqp->device; in mlx4_ib_handle_qp_event()
219 event.element.qp = ibqp; in mlx4_ib_handle_qp_event()
252 ibqp->event_handler(&event, ibqp->qp_context); in mlx4_ib_handle_qp_event()
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event() local
267 if (!ibqp->event_handler) in mlx4_ib_qp_event()
774 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
1344 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1345 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1347 return to_mpd(qp->ibqp.pd); in get_pd()
[all …]
/linux-6.6.21/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2.c194 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; in fill_ext_sge_inl_data()
270 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in check_inl_data_len()
287 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_inl()
328 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, in set_rwqe_data_seg() argument
335 struct hns_roce_qp *qp = to_hr_qp(ibqp); in set_rwqe_data_seg()
491 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); in set_ud_wqe()
573 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_wqe()
601 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, in set_rc_wqe()
688 static int hns_roce_v2_post_send(struct ib_qp *ibqp, in hns_roce_v2_post_send() argument
692 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_send()
[all …]
Dhns_roce_qp.c57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle()
131 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event() local
134 if (ibqp->event_handler) { in hns_roce_ib_qp_event()
135 event.device = ibqp->device; in hns_roce_ib_qp_event()
136 event.element.qp = ibqp; in hns_roce_ib_qp_event()
165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", in hns_roce_ib_qp_event()
169 ibqp->event_handler(&event, ibqp->qp_context); in hns_roce_ib_qp_event()
235 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { in alloc_qpn()
374 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove()
377 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && in hns_roce_qp_remove()
[all …]
/linux-6.6.21/drivers/infiniband/sw/rxe/
Drxe_loc.h38 int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
39 int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
118 return qp->ibqp.qp_num; in qp_num()
123 return qp->ibqp.qp_type; in qp_type()
133 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) in qp_mtu()
180 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type]; in wr_opcode_mask()
Drxe_comp.c230 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in check_ack()
413 wc->qp = &qp->ibqp; in make_send_cqe()
417 uwc->qp_num = qp->ibqp.qp_num; in make_send_cqe()
451 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in do_complete()
493 if (qp->ibqp.event_handler) { in comp_check_sq_drain_done()
496 ev.device = qp->ibqp.device; in comp_check_sq_drain_done()
497 ev.element.qp = &qp->ibqp; in comp_check_sq_drain_done()
499 qp->ibqp.event_handler(&ev, in comp_check_sq_drain_done()
500 qp->ibqp.qp_context); in comp_check_sq_drain_done()
561 ib_device_put(qp->ibqp.device); in drain_resp_pkts()
[all …]
/linux-6.6.21/drivers/infiniband/hw/cxgb4/
Dev.c109 event.element.qp = &qhp->ibqp; in post_qp_event()
110 if (qhp->ibqp.event_handler) in post_qp_event()
111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event()
153 c4iw_qp_add_ref(&qhp->ibqp); in c4iw_ev_dispatch()
217 c4iw_qp_rem_ref(&qhp->ibqp); in c4iw_ev_dispatch()
/linux-6.6.21/include/rdma/
Drdmavt_qp.h333 struct ib_qp ibqp; member
464 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) in ibqp_to_rvtqp() argument
466 return container_of(ibqp, struct rvt_qp, ibqp); in ibqp_to_rvtqp()
711 if (qp->ibqp.qp_num == qpn) in rvt_lookup_qpn()
725 struct ib_qp *ibqp = &qp->ibqp; in rvt_mod_retry_timer_ext() local
726 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); in rvt_mod_retry_timer_ext()
785 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); in rvt_recv_cq()
805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
859 .qp = &qp->ibqp, in rvt_qp_complete_swqe()
968 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); in rvt_to_iport()

123456