/linux-6.1.9/drivers/infiniband/sw/rxe/ |
D | rxe_req.c | 41 unsigned int wqe_index; in req_retry() local 52 qp->req.wqe_index = cons; in req_retry() 56 for (wqe_index = cons; wqe_index != prod; in req_retry() 57 wqe_index = queue_next_index(q, wqe_index)) { in req_retry() 58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index); in req_retry() 115 unsigned int index = qp->req.wqe_index; in req_next_wqe() 186 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue, in rxe_wqe_is_fenced() 554 qp->req.wqe_index = queue_next_index(qp->sq.queue, in update_state() 555 qp->req.wqe_index); in update_state() 605 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); in rxe_do_local_ops() [all …]
|
D | rxe_verbs.h | 118 int wqe_index; member
|
D | rxe_qp.c | 235 qp->req.wqe_index = queue_get_producer(qp->sq.queue, in rxe_qp_init_req()
|
/linux-6.1.9/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 373 struct mthca_qp *qp, int wqe_index, int is_send, in handle_error_cqe() argument 461 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); in handle_error_cqe() 486 int wqe_index; in mthca_poll_one() local 535 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one() 537 entry->wr_id = (*cur_qp)->wrid[wqe_index + in mthca_poll_one() 543 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one() 544 entry->wr_id = srq->wrid[wqe_index]; in mthca_poll_one() 550 wqe_index = wqe >> wq->wqe_shift; in mthca_poll_one() 556 if (unlikely(wqe_index < 0)) in mthca_poll_one() 557 wqe_index = wq->max - 1; in mthca_poll_one() [all …]
|
/linux-6.1.9/include/linux/mlx4/ |
D | cq.h | 57 __be16 wqe_index; member 66 __be16 wqe_index; member 82 __be16 wqe_index; member
|
/linux-6.1.9/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 308 int *wqe_index) in ehea_get_swqe() argument 313 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); in ehea_get_swqe() 325 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) in ehea_poll_rq1() argument 329 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); in ehea_poll_rq1()
|
D | ehea_main.c | 592 int arr_len, int wqe_index) in get_skb_by_index_ll() argument 598 x = wqe_index + 1; in get_skb_by_index_ll() 611 skb = skb_array[wqe_index]; in get_skb_by_index_ll() 612 skb_array[wqe_index] = NULL; in get_skb_by_index_ll() 668 int wqe_index, last_wqe_index, rq, port_reset; in ehea_proc_rwqes() local 673 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes() 681 last_wqe_index = wqe_index; in ehea_proc_rwqes() 688 wqe_index); in ehea_proc_rwqes() 740 cqe = ehea_poll_rq1(qp, &wqe_index); in ehea_proc_rwqes() 883 int wqe_index; in ehea_poll() local [all …]
|
/linux-6.1.9/drivers/infiniband/hw/mlx5/ |
D | odp.c | 64 u16 wqe_index; member 1059 u16 wqe_index = pfault->wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler() local 1073 wqe_index, qpn); in mlx5_ib_mr_initiator_pfault_handler() 1197 u16 wqe_index = pfault->wqe.wqe_index; in mlx5_ib_mr_wqe_pfault_handler() local 1228 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler() 1235 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler() 1244 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE, in mlx5_ib_mr_wqe_pfault_handler() 1272 ret, wqe_index, pfault->token); in mlx5_ib_mr_wqe_pfault_handler() 1444 pfault->wqe.wqe_index = in mlx5_ib_eq_pf_process() 1445 be16_to_cpu(pf_eqe->wqe.wqe_index); in mlx5_ib_eq_pf_process() [all …]
|
D | srq.c | 388 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) in mlx5_ib_free_srq_wqe() argument 396 next->next_wqe_index = cpu_to_be16(wqe_index); in mlx5_ib_free_srq_wqe() 397 srq->tail = wqe_index; in mlx5_ib_free_srq_wqe()
|
D | qp.c | 108 size_t buflen, int wqe_index, in mlx5_ib_read_user_wqe_common() argument 113 size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift); in mlx5_ib_read_user_wqe_common() 134 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_kernel_wqe_sq() argument 143 wqe_index = wqe_index & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq() 146 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq() 162 wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq() 163 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq() 169 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_user_wqe_sq() argument 183 ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, in mlx5_ib_read_user_wqe_sq() 220 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, in mlx5_ib_read_wqe_sq() argument [all …]
|
D | mlx5_ib.h | 1212 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 1239 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1241 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, 1243 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
|
/linux-6.1.9/drivers/infiniband/hw/mlx4/ |
D | srq.c | 293 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) in mlx4_ib_free_srq_wqe() argument 301 next->next_wqe_index = cpu_to_be16(wqe_index); in mlx4_ib_free_srq_wqe() 302 srq->tail = wqe_index; in mlx4_ib_free_srq_wqe()
|
D | cq.c | 517 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), in mlx4_ib_handle_error_cqe() 734 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one() 741 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one() 746 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one() 944 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); in __mlx4_ib_cq_clean()
|
D | mlx4_ib.h | 791 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/ |
D | en_tx.c | 402 u16 wqe_index; in mlx4_en_handle_err_cqe() local 410 wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; in mlx4_en_handle_err_cqe() 411 tx_info = &ring->tx_info[wqe_index]; in mlx4_en_handle_err_cqe() 414 wqe_index, desc_size); in mlx4_en_handle_err_cqe() 415 tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); in mlx4_en_handle_err_cqe() 477 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; in mlx4_en_process_tx_cq()
|
D | en_netdev.c | 1739 cq->buf->wqe_index = cpu_to_be16(0xffff); in mlx4_en_start_port()
|
/linux-6.1.9/include/linux/mlx5/ |
D | device.h | 644 __be16 wqe_index; member
|
D | mlx5_ifc.h | 3055 u8 wqe_index[0x10]; member
|
/linux-6.1.9/drivers/infiniband/hw/hns/ |
D | hns_roce_hw_v2.c | 906 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index) in hns_roce_free_srq_wqe() argument 911 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); in hns_roce_free_srq_wqe() 3614 int wqe_index; in __hns_roce_v2_cq_clean() local 3631 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX); in __hns_roce_v2_cq_clean() 3632 hns_roce_free_srq_wqe(srq, wqe_index); in __hns_roce_v2_cq_clean()
|
/linux-6.1.9/drivers/scsi/elx/libefc_sli/ |
D | sli4.h | 983 __le16 wqe_index; member
|