Home
last modified time | relevance | path

Searched refs:ibcq (Results 1 – 25 of 56) sorted by relevance

123

/linux-5.19.10/drivers/infiniband/sw/rdmavt/
Dcq.c54 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
55 head = cq->ibcq.cqe; in rvt_cq_enter()
68 if (cq->ibcq.event_handler) { in rvt_cq_enter()
71 ev.device = cq->ibcq.device; in rvt_cq_enter()
72 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
74 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
140 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
158 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in rvt_create_cq() argument
161 struct ib_device *ibdev = ibcq->device; in rvt_create_cq()
163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_create_cq()
[all …]
Dcq.h12 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
14 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
15 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
16 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
17 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
/linux-5.19.10/drivers/infiniband/hw/mlx4/
Dcq.c45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() local
46 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx4_ib_cq_comp()
52 struct ib_cq *ibcq; in mlx4_ib_cq_event() local
60 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
61 if (ibcq->event_handler) { in mlx4_ib_cq_event()
62 event.device = ibcq->device; in mlx4_ib_cq_event()
64 event.element.cq = ibcq; in mlx4_ib_cq_event()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx4_ib_cq_event()
81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
[all …]
Dmlx4_ib.h116 struct ib_cq ibcq; member
688 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
690 return container_of(ibcq, struct mlx4_ib_cq, ibcq); in to_mcq()
766 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
767 int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
770 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
/linux-5.19.10/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c63 int pvrdma_req_notify_cq(struct ib_cq *ibcq, in pvrdma_req_notify_cq() argument
66 struct pvrdma_dev *dev = to_vdev(ibcq->device); in pvrdma_req_notify_cq()
67 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq()
83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
101 int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in pvrdma_create_cq() argument
104 struct ib_device *ibdev = ibcq->device; in pvrdma_create_cq()
107 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_create_cq()
132 cq->ibcq.cqe = entries; in pvrdma_create_cq()
195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
209 pvrdma_destroy_cq(&cq->ibcq, udata); in pvrdma_create_cq()
[all …]
Dpvrdma_main.c186 INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
321 if (cq && cq->ibcq.event_handler) { in pvrdma_cq_event()
322 struct ib_cq *ibcq = &cq->ibcq; in pvrdma_cq_event() local
325 e.device = ibcq->device; in pvrdma_cq_event()
326 e.element.cq = ibcq; in pvrdma_cq_event()
328 ibcq->event_handler(&e, ibcq->cq_context); in pvrdma_cq_event()
492 if (cq && cq->ibcq.comp_handler) in pvrdma_intrx_handler()
493 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in pvrdma_intrx_handler()
Dpvrdma.h87 struct ib_cq ibcq; member
274 static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq) in to_vcq() argument
276 return container_of(ibcq, struct pvrdma_cq, ibcq); in to_vcq()
/linux-5.19.10/drivers/infiniband/sw/rxe/
Drxe_cq.c54 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_send_complete()
87 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
101 cq->ibcq.cqe = cqe; in rxe_cq_resize_queue()
118 if (cq->ibcq.event_handler) { in rxe_cq_post()
119 ev.device = cq->ibcq.device; in rxe_cq_post()
120 ev.element.cq = &cq->ibcq; in rxe_cq_post()
122 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
Drxe_verbs.c764 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, in rxe_create_cq() argument
768 struct ib_device *dev = ibcq->device; in rxe_create_cq()
770 struct rxe_cq *cq = to_rcq(ibcq); in rxe_create_cq()
794 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in rxe_destroy_cq() argument
796 struct rxe_cq *cq = to_rcq(ibcq); in rxe_destroy_cq()
810 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rxe_resize_cq() argument
813 struct rxe_cq *cq = to_rcq(ibcq); in rxe_resize_cq()
814 struct rxe_dev *rxe = to_rdev(ibcq->device); in rxe_resize_cq()
837 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in rxe_poll_cq() argument
840 struct rxe_cq *cq = to_rcq(ibcq); in rxe_poll_cq()
[all …]
/linux-5.19.10/drivers/infiniband/hw/mthca/
Dmthca_cq.c181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
254 event.element.cq = &cq->ibcq; in mthca_cq_event()
255 if (cq->ibcq.event_handler) in mthca_cq_event()
256 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); in mthca_cq_event()
290 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); in mthca_cq_clean()
292 if (prod_index == cq->cons_index + cq->ibcq.cqe) in mthca_cq_clean()
304 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
310 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), in mthca_cq_clean()
316 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); in mthca_cq_clean()
[all …]
Dmthca_provider.h184 struct ib_cq ibcq; member
301 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
303 return container_of(ibcq, struct mthca_cq, ibcq); in to_mcq()
Dmthca_provider.c575 static int mthca_create_cq(struct ib_cq *ibcq, in mthca_create_cq() argument
579 struct ib_device *ibdev = ibcq->device; in mthca_create_cq()
611 cq = to_mcq(ibcq); in mthca_create_cq()
696 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) in mthca_resize_cq() argument
698 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_resize_cq()
699 struct mthca_cq *cq = to_mcq(ibcq); in mthca_resize_cq()
710 if (entries == ibcq->cqe + 1) { in mthca_resize_cq()
750 tcqe = cq->ibcq.cqe; in mthca_resize_cq()
752 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq()
764 ibcq->cqe = entries - 1; in mthca_resize_cq()
[all …]
/linux-5.19.10/drivers/infiniband/hw/mlx5/
Dcq.c43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() local
45 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx5_ib_cq_comp()
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
52 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() local
61 if (ibcq->event_handler) { in mlx5_ib_cq_event()
64 event.element.cq = ibcq; in mlx5_ib_cq_event()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx5_ib_cq_event()
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
448 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
[all …]
Drestrack.c151 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) in fill_res_cq_entry_raw() argument
153 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); in fill_res_cq_entry_raw()
154 struct mlx5_ib_cq *cq = to_mcq(ibcq); in fill_res_cq_entry_raw()
Dmlx5_ib.h538 struct ib_cq ibcq; member
1148 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
1150 return container_of(ibcq, struct mlx5_ib_cq, ibcq); in to_mcq()
1254 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1257 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1258 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1260 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1322 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1434 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
/linux-5.19.10/include/rdma/
Drdmavt_cq.h47 struct ib_cq ibcq; member
60 static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) in ibcq_to_rvtcq() argument
62 return container_of(ibcq, struct rvt_cq, ibcq); in ibcq_to_rvtcq()
/linux-5.19.10/drivers/infiniband/hw/cxgb4/
Dev.c105 event.device = chp->ibcq.device; in post_qp_event()
107 event.element.cq = &chp->ibcq; in post_qp_event()
115 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
234 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
Diw_cxgb4.h423 struct ib_cq ibcq; member
434 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) in to_c4iw_cq() argument
436 return container_of(ibcq, struct c4iw_cq, ibcq); in to_c4iw_cq()
958 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
982 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
984 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1042 int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
/linux-5.19.10/drivers/infiniband/hw/hns/
Dhns_roce_cq.c452 struct ib_cq *ibcq; in hns_roce_cq_completion() local
463 ibcq = &hr_cq->ib_cq; in hns_roce_cq_completion()
464 if (ibcq->comp_handler) in hns_roce_cq_completion()
465 ibcq->comp_handler(ibcq, ibcq->cq_context); in hns_roce_cq_completion()
473 struct ib_cq *ibcq; in hns_roce_cq_event() local
492 ibcq = &hr_cq->ib_cq; in hns_roce_cq_event()
493 if (ibcq->event_handler) { in hns_roce_cq_event()
494 event.device = ibcq->device; in hns_roce_cq_event()
495 event.element.cq = ibcq; in hns_roce_cq_event()
497 ibcq->event_handler(&event, ibcq->cq_context); in hns_roce_cq_event()
/linux-5.19.10/drivers/infiniband/hw/qedr/
Dqedr_roce_cm.c82 cq->ibcq.comp_handler ? "Yes" : "No"); in qedr_ll2_complete_tx_packet()
92 if (cq->ibcq.comp_handler) in qedr_ll2_complete_tx_packet()
93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_tx_packet()
121 if (cq->ibcq.comp_handler) in qedr_ll2_complete_rx_packet()
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_rx_packet()
671 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in qedr_gsi_poll_cq() argument
673 struct qedr_dev *dev = get_qedr_dev(ibcq->device); in qedr_gsi_poll_cq()
674 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_gsi_poll_cq()
Dverbs.h54 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
56 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
57 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
Dmain.c233 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
468 if (!cq->destroyed && cq->ibcq.comp_handler) in qedr_irq_handler()
469 (*cq->ibcq.comp_handler) in qedr_irq_handler()
470 (&cq->ibcq, cq->ibcq.cq_context); in qedr_irq_handler()
649 struct ib_cq *ibcq; in qedr_affiliated_event() local
720 ibcq = &cq->ibcq; in qedr_affiliated_event()
721 if (ibcq->event_handler) { in qedr_affiliated_event()
722 event.device = ibcq->device; in qedr_affiliated_event()
723 event.element.cq = ibcq; in qedr_affiliated_event()
724 ibcq->event_handler(&event, ibcq->cq_context); in qedr_affiliated_event()
/linux-5.19.10/drivers/infiniband/hw/efa/
Defa.h89 struct ib_cq ibcq; member
151 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
152 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/linux-5.19.10/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.h72 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
75 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
Docrdma.h316 struct ib_cq ibcq; member
471 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) in get_ocrdma_cq() argument
473 return container_of(ibcq, struct ocrdma_cq, ibcq); in get_ocrdma_cq()

123