Lines Matching refs:wqe
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
18 struct rxe_send_wqe *wqe, int npsn) in retry_first_write_send() argument
23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
24 qp->mtu : wqe->dma.resid; in retry_first_write_send()
26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send()
27 wqe->wr.opcode); in retry_first_write_send()
29 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send()
30 wqe->dma.resid -= to_send; in retry_first_write_send()
31 wqe->dma.sge_offset += to_send; in retry_first_write_send()
33 advance_dma_data(&wqe->dma, to_send); in retry_first_write_send()
40 struct rxe_send_wqe *wqe; in req_retry() local
58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index); in req_retry()
59 mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_retry()
61 if (wqe->state == wqe_state_posted) in req_retry()
64 if (wqe->state == wqe_state_done) in req_retry()
67 wqe->iova = (mask & WR_ATOMIC_MASK) ? in req_retry()
68 wqe->wr.wr.atomic.remote_addr : in req_retry()
70 wqe->wr.wr.rdma.remote_addr : in req_retry()
74 wqe->dma.resid = wqe->dma.length; in req_retry()
75 wqe->dma.cur_sge = 0; in req_retry()
76 wqe->dma.sge_offset = 0; in req_retry()
83 npsn = (qp->comp.psn - wqe->first_psn) & in req_retry()
85 retry_first_write_send(qp, wqe, npsn); in req_retry()
89 npsn = (wqe->dma.length - wqe->dma.resid) / in req_retry()
91 wqe->iova += npsn * qp->mtu; in req_retry()
95 wqe->state = wqe_state_posted; in req_retry()
113 struct rxe_send_wqe *wqe; in req_next_wqe() local
119 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); in req_next_wqe()
135 if (wqe && ((index != cons) || in req_next_wqe()
136 (wqe->state != wqe_state_posted))) { in req_next_wqe()
160 wqe = queue_addr_from_index(q, index); in req_next_wqe()
164 (wqe->state != wqe_state_processing))) in req_next_wqe()
167 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_next_wqe()
168 return wqe; in req_next_wqe()
179 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in rxe_wqe_is_fenced() argument
185 if (wqe->wr.opcode == IB_WR_LOCAL_INV) in rxe_wqe_is_fenced()
193 return (wqe->wr.send_flags & IB_SEND_FENCE) && in rxe_wqe_is_fenced()
320 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in next_opcode() argument
323 int fits = (wqe->dma.resid <= qp->mtu); in next_opcode()
350 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in check_init_depth() argument
354 if (wqe->has_rd_atomic) in check_init_depth()
362 wqe->has_rd_atomic = 1; in check_init_depth()
382 struct rxe_send_wqe *wqe, in init_req_packet() argument
388 struct rxe_send_wr *ibwr = &wqe->wr; in init_req_packet()
425 reth_set_va(pkt, wqe->iova); in init_req_packet()
426 reth_set_len(pkt, wqe->dma.resid); in init_req_packet()
436 atmeth_set_va(pkt, wqe->iova); in init_req_packet()
458 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, in finish_packet() argument
468 if (wqe->wr.send_flags & IB_SEND_INLINE) { in finish_packet()
469 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; in finish_packet()
473 wqe->dma.resid -= payload; in finish_packet()
474 wqe->dma.sge_offset += payload; in finish_packet()
476 err = copy_data(qp->pd, 0, &wqe->dma, in finish_packet()
493 struct rxe_send_wqe *wqe, in update_wqe_state() argument
498 wqe->state = wqe_state_pending; in update_wqe_state()
500 wqe->state = wqe_state_processing; in update_wqe_state()
505 struct rxe_send_wqe *wqe, in update_wqe_psn() argument
510 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; in update_wqe_psn()
517 wqe->first_psn = qp->req.psn; in update_wqe_psn()
518 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK; in update_wqe_psn()
522 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK; in update_wqe_psn()
527 static void save_state(struct rxe_send_wqe *wqe, in save_state() argument
532 rollback_wqe->state = wqe->state; in save_state()
533 rollback_wqe->first_psn = wqe->first_psn; in save_state()
534 rollback_wqe->last_psn = wqe->last_psn; in save_state()
538 static void rollback_state(struct rxe_send_wqe *wqe, in rollback_state() argument
543 wqe->state = rollback_wqe->state; in rollback_state()
544 wqe->first_psn = rollback_wqe->first_psn; in rollback_state()
545 wqe->last_psn = rollback_wqe->last_psn; in rollback_state()
564 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) in rxe_do_local_ops() argument
566 u8 opcode = wqe->wr.opcode; in rxe_do_local_ops()
572 rkey = wqe->wr.ex.invalidate_rkey; in rxe_do_local_ops()
579 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_do_local_ops()
584 ret = rxe_reg_fast_mr(qp, wqe); in rxe_do_local_ops()
586 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_do_local_ops()
591 ret = rxe_bind_mw(qp, wqe); in rxe_do_local_ops()
593 wqe->status = IB_WC_MW_BIND_ERR; in rxe_do_local_ops()
599 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_do_local_ops()
603 wqe->state = wqe_state_done; in rxe_do_local_ops()
604 wqe->status = IB_WC_SUCCESS; in rxe_do_local_ops()
622 struct rxe_send_wqe *wqe; in rxe_requester() local
642 wqe = req_next_wqe(qp); in rxe_requester()
643 if (wqe) in rxe_requester()
674 wqe = req_next_wqe(qp); in rxe_requester()
675 if (unlikely(!wqe)) in rxe_requester()
678 if (rxe_wqe_is_fenced(qp, wqe)) { in rxe_requester()
683 if (wqe->mask & WR_LOCAL_OP_MASK) { in rxe_requester()
684 err = rxe_do_local_ops(qp, wqe); in rxe_requester()
705 opcode = next_opcode(qp, wqe, wqe->wr.opcode); in rxe_requester()
707 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
713 if (check_init_depth(qp, wqe)) in rxe_requester()
718 payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0; in rxe_requester()
728 wqe->first_psn = qp->req.psn; in rxe_requester()
729 wqe->last_psn = qp->req.psn; in rxe_requester()
734 wqe->state = wqe_state_done; in rxe_requester()
735 wqe->status = IB_WC_SUCCESS; in rxe_requester()
747 pkt.wqe = wqe; in rxe_requester()
752 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
756 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt); in rxe_requester()
759 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
765 err = finish_packet(qp, av, wqe, &pkt, skb, payload); in rxe_requester()
769 wqe->status = IB_WC_LOC_PROT_ERR; in rxe_requester()
771 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
787 save_state(wqe, qp, &rollback_wqe, &rollback_psn); in rxe_requester()
788 update_wqe_state(qp, wqe, &pkt); in rxe_requester()
789 update_wqe_psn(qp, wqe, &pkt, payload); in rxe_requester()
795 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); in rxe_requester()
802 wqe->status = IB_WC_LOC_QP_OP_ERR; in rxe_requester()
818 wqe->state = wqe_state_error; in rxe_requester()