Lines Matching refs:qhp
87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) in set_state() argument
90 spin_lock_irqsave(&qhp->lock, flag); in set_state()
91 qhp->attr.state = state; in set_state()
92 spin_unlock_irqrestore(&qhp->lock, flag); in set_state()
690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr) in post_write_cmpl() argument
693 qhp->sq_sig_all; in post_write_cmpl()
695 qhp->sq_sig_all; in post_write_cmpl()
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in post_write_cmpl()
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in post_write_cmpl()
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); in post_write_cmpl()
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
713 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
724 write_wrid = qhp->wq.sq.pidx; in post_write_cmpl()
727 qhp->wq.sq.in_use++; in post_write_cmpl()
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size) in post_write_cmpl()
729 qhp->wq.sq.pidx = 0; in post_write_cmpl()
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
737 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; in post_write_cmpl()
753 t4_sq_produce(&qhp->wq, len16); in post_write_cmpl()
756 t4_ring_sq_db(&qhp->wq, idx, wqe); in post_write_cmpl()
759 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, in build_rdma_recv() argument
764 ret = build_isgl((__be64 *)qhp->wq.rq.queue, in build_rdma_recv()
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], in build_rdma_recv()
912 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_sq_db() argument
916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
917 spin_lock(&qhp->lock); in ring_kernel_sq_db()
918 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
919 t4_ring_sq_db(&qhp->wq, inc, NULL); in ring_kernel_sq_db()
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
922 qhp->wq.sq.wq_pidx_inc += inc; in ring_kernel_sq_db()
924 spin_unlock(&qhp->lock); in ring_kernel_sq_db()
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
929 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) in ring_kernel_rq_db() argument
933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
934 spin_lock(&qhp->lock); in ring_kernel_rq_db()
935 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
936 t4_ring_rq_db(&qhp->wq, inc, NULL); in ring_kernel_rq_db()
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
939 qhp->wq.rq.wq_pidx_inc += inc; in ring_kernel_rq_db()
941 spin_unlock(&qhp->lock); in ring_kernel_rq_db()
942 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
979 static int complete_sq_drain_wr(struct c4iw_qp *qhp, in complete_sq_drain_wr() argument
988 schp = to_c4iw_cq(qhp->ibqp.send_cq); in complete_sq_drain_wr()
1001 CQE_QPID_V(qhp->wq.sq.qid)); in complete_sq_drain_wr()
1018 static int complete_sq_drain_wrs(struct c4iw_qp *qhp, in complete_sq_drain_wrs() argument
1025 ret = complete_sq_drain_wr(qhp, wr); in complete_sq_drain_wrs()
1035 static void complete_rq_drain_wr(struct c4iw_qp *qhp, in complete_rq_drain_wr() argument
1043 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in complete_rq_drain_wr()
1052 CQE_QPID_V(qhp->wq.sq.qid)); in complete_rq_drain_wr()
1068 static void complete_rq_drain_wrs(struct c4iw_qp *qhp, in complete_rq_drain_wrs() argument
1072 complete_rq_drain_wr(qhp, wr); in complete_rq_drain_wrs()
1084 struct c4iw_qp *qhp; in c4iw_post_send() local
1092 qhp = to_c4iw_qp(ibqp); in c4iw_post_send()
1093 rhp = qhp->rhp; in c4iw_post_send()
1094 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_send()
1100 if (qhp->wq.flushed) { in c4iw_post_send()
1101 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1102 err = complete_sq_drain_wrs(qhp, wr, bad_wr); in c4iw_post_send()
1105 num_wrs = t4_sq_avail(&qhp->wq); in c4iw_post_send()
1107 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support && in c4iw_post_send()
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= in c4iw_post_send()
1131 post_write_cmpl(qhp, wr); in c4iw_post_send()
1132 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1142 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in c4iw_post_send()
1143 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_post_send()
1148 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
1150 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in c4iw_post_send()
1161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1189 if (!qhp->wq.sq.oldest_read) in c4iw_post_send()
1190 qhp->wq.sq.oldest_read = swsqe; in c4iw_post_send()
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), in c4iw_post_send()
1229 swsqe->idx = qhp->wq.sq.pidx; in c4iw_post_send()
1232 qhp->sq_sig_all; in c4iw_post_send()
1241 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); in c4iw_post_send()
1244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
1248 t4_sq_produce(&qhp->wq, len16); in c4iw_post_send()
1252 t4_ring_sq_db(&qhp->wq, idx, wqe); in c4iw_post_send()
1253 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1255 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1256 ring_kernel_sq_db(qhp, idx); in c4iw_post_send()
1265 struct c4iw_qp *qhp; in c4iw_post_receive() local
1272 qhp = to_c4iw_qp(ibqp); in c4iw_post_receive()
1273 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_receive()
1279 if (qhp->wq.flushed) { in c4iw_post_receive()
1280 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1281 complete_rq_drain_wrs(qhp, wr); in c4iw_post_receive()
1284 num_wrs = t4_rq_avail(&qhp->wq); in c4iw_post_receive()
1286 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1296 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + in c4iw_post_receive()
1297 qhp->wq.rq.wq_pidx * in c4iw_post_receive()
1300 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = in c4iw_post_receive()
1312 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
1313 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time = in c4iw_post_receive()
1319 wqe->recv.wrid = qhp->wq.rq.pidx; in c4iw_post_receive()
1325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
1326 t4_rq_produce(&qhp->wq, len16); in c4iw_post_receive()
1331 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
1332 t4_ring_rq_db(&qhp->wq, idx, wqe); in c4iw_post_receive()
1333 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1335 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1336 ring_kernel_rq_db(qhp, idx); in c4iw_post_receive()
1560 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, in post_terminate() argument
1567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, in post_terminate()
1568 qhp->ep->hwtid); in post_terminate()
1570 skb = skb_dequeue(&qhp->ep->com.ep_skb_list); in post_terminate()
1574 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in post_terminate()
1579 FW_WR_FLOWID_V(qhp->ep->hwtid) | in post_terminate()
1585 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { in post_terminate()
1586 term->layer_etype = qhp->attr.layer_etype; in post_terminate()
1587 term->ecode = qhp->attr.ecode; in post_terminate()
1590 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1596 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, in __flush_qp() argument
1603 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp); in __flush_qp()
1609 spin_lock(&qhp->lock); in __flush_qp()
1611 if (qhp->wq.flushed) { in __flush_qp()
1612 spin_unlock(&qhp->lock); in __flush_qp()
1618 qhp->wq.flushed = 1; in __flush_qp()
1619 t4_set_wq_in_error(&qhp->wq, 0); in __flush_qp()
1621 c4iw_flush_hw_cq(rchp, qhp); in __flush_qp()
1622 if (!qhp->srq) { in __flush_qp()
1623 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp()
1624 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp()
1628 c4iw_flush_hw_cq(schp, qhp); in __flush_qp()
1629 sq_flushed = c4iw_flush_sq(qhp); in __flush_qp()
1631 spin_unlock(&qhp->lock); in __flush_qp()
1660 static void flush_qp(struct c4iw_qp *qhp) in flush_qp() argument
1665 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in flush_qp()
1666 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1668 if (qhp->ibqp.uobject) { in flush_qp()
1671 if (qhp->wq.flushed) in flush_qp()
1674 qhp->wq.flushed = 1; in flush_qp()
1675 t4_set_wq_in_error(&qhp->wq, 0); in flush_qp()
1689 __flush_qp(qhp, rchp, schp); in flush_qp()
1692 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in rdma_fini() argument
1699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); in rdma_fini()
1719 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_fini()
1749 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) in rdma_init() argument
1755 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp, in rdma_init()
1756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); in rdma_init()
1763 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1765 qhp->attr.max_ird = 0; in rdma_init()
1769 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in rdma_init()
1776 FW_WR_FLOWID_V(qhp->ep->hwtid) | in rdma_init()
1779 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp; in rdma_init()
1783 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | in rdma_init()
1784 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); in rdma_init()
1786 if (qhp->attr.mpa_attr.recv_marker_enabled) in rdma_init()
1788 if (qhp->attr.mpa_attr.xmit_marker_enabled) in rdma_init()
1790 if (qhp->attr.mpa_attr.crc_enabled) in rdma_init()
1796 if (!qhp->ibqp.uobject) in rdma_init()
1799 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); in rdma_init()
1800 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); in rdma_init()
1801 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1802 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1803 if (qhp->srq) { in rdma_init()
1805 qhp->srq->idx); in rdma_init()
1807 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); in rdma_init()
1808 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); in rdma_init()
1809 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - in rdma_init()
1812 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); in rdma_init()
1813 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); in rdma_init()
1814 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); in rdma_init()
1815 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); in rdma_init()
1816 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); in rdma_init()
1817 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); in rdma_init()
1818 if (qhp->attr.mpa_attr.initiator) in rdma_init()
1819 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); in rdma_init()
1821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, in rdma_init()
1822 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_init()
1826 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1832 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in c4iw_modify_qp() argument
1838 struct c4iw_qp_attributes newattr = qhp->attr; in c4iw_modify_qp()
1846 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, in c4iw_modify_qp()
1849 mutex_lock(&qhp->mutex); in c4iw_modify_qp()
1853 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { in c4iw_modify_qp()
1877 qhp->attr = newattr; in c4iw_modify_qp()
1881 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); in c4iw_modify_qp()
1885 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); in c4iw_modify_qp()
1891 if (qhp->attr.state == attrs->next_state) in c4iw_modify_qp()
1894 switch (qhp->attr.state) { in c4iw_modify_qp()
1906 qhp->attr.mpa_attr = attrs->mpa_attr; in c4iw_modify_qp()
1907 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; in c4iw_modify_qp()
1908 qhp->ep = qhp->attr.llp_stream_handle; in c4iw_modify_qp()
1909 set_state(qhp, C4IW_QP_STATE_RTS); in c4iw_modify_qp()
1917 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1918 ret = rdma_init(rhp, qhp); in c4iw_modify_qp()
1923 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1924 flush_qp(qhp); in c4iw_modify_qp()
1934 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1935 set_state(qhp, C4IW_QP_STATE_CLOSING); in c4iw_modify_qp()
1936 ep = qhp->ep; in c4iw_modify_qp()
1940 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1942 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1947 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1948 set_state(qhp, C4IW_QP_STATE_TERMINATE); in c4iw_modify_qp()
1949 qhp->attr.layer_etype = attrs->layer_etype; in c4iw_modify_qp()
1950 qhp->attr.ecode = attrs->ecode; in c4iw_modify_qp()
1951 ep = qhp->ep; in c4iw_modify_qp()
1957 terminate = qhp->attr.send_term; in c4iw_modify_qp()
1958 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1964 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1965 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
1968 ep = qhp->ep; in c4iw_modify_qp()
1969 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1983 if (!internal && (qhp->ibqp.uobject || attrs->next_state != in c4iw_modify_qp()
1990 flush_qp(qhp); in c4iw_modify_qp()
1991 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
1992 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1993 c4iw_put_ep(&qhp->ep->com); in c4iw_modify_qp()
1994 qhp->ep = NULL; in c4iw_modify_qp()
1995 wake_up(&qhp->wait); in c4iw_modify_qp()
2009 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { in c4iw_modify_qp()
2013 set_state(qhp, C4IW_QP_STATE_IDLE); in c4iw_modify_qp()
2023 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state); in c4iw_modify_qp()
2030 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep, in c4iw_modify_qp()
2031 qhp->wq.sq.qid); in c4iw_modify_qp()
2034 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
2036 ep = qhp->ep; in c4iw_modify_qp()
2037 qhp->ep = NULL; in c4iw_modify_qp()
2038 set_state(qhp, C4IW_QP_STATE_ERROR); in c4iw_modify_qp()
2041 flush_qp(qhp); in c4iw_modify_qp()
2042 wake_up(&qhp->wait); in c4iw_modify_qp()
2044 mutex_unlock(&qhp->mutex); in c4iw_modify_qp()
2047 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); in c4iw_modify_qp()
2066 pr_debug("exit state %d\n", qhp->attr.state); in c4iw_modify_qp()
2073 struct c4iw_qp *qhp; in c4iw_destroy_qp() local
2077 qhp = to_c4iw_qp(ib_qp); in c4iw_destroy_qp()
2078 rhp = qhp->rhp; in c4iw_destroy_qp()
2079 ucontext = qhp->ucontext; in c4iw_destroy_qp()
2082 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) in c4iw_destroy_qp()
2083 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); in c4iw_destroy_qp()
2085 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); in c4iw_destroy_qp()
2086 wait_event(qhp->wait, !qhp->ep); in c4iw_destroy_qp()
2089 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp()
2090 if (!list_empty(&qhp->db_fc_entry)) in c4iw_destroy_qp()
2091 list_del_init(&qhp->db_fc_entry); in c4iw_destroy_qp()
2093 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
2097 wait_for_completion(&qhp->qp_rel_comp); in c4iw_destroy_qp()
2099 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); in c4iw_destroy_qp()
2100 pr_debug("qhp %p ucontext %p\n", qhp, ucontext); in c4iw_destroy_qp()
2102 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2105 c4iw_put_wr_wait(qhp->wr_waitp); in c4iw_destroy_qp()
2114 struct c4iw_qp *qhp = to_c4iw_qp(qp); in c4iw_create_qp() local
2153 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_qp()
2154 if (!qhp->wr_waitp) in c4iw_create_qp()
2157 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
2158 qhp->wq.sq.memsize = in c4iw_create_qp()
2160 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); in c4iw_create_qp()
2161 qhp->wq.sq.flush_cidx = -1; in c4iw_create_qp()
2163 qhp->wq.rq.size = rqsize; in c4iw_create_qp()
2164 qhp->wq.rq.memsize = in c4iw_create_qp()
2166 sizeof(*qhp->wq.rq.queue); in c4iw_create_qp()
2170 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); in c4iw_create_qp()
2172 qhp->wq.rq.memsize = in c4iw_create_qp()
2173 roundup(qhp->wq.rq.memsize, PAGE_SIZE); in c4iw_create_qp()
2176 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
2178 qhp->wr_waitp, !attrs->srq); in c4iw_create_qp()
2186 qhp->rhp = rhp; in c4iw_create_qp()
2187 qhp->attr.pd = php->pdid; in c4iw_create_qp()
2188 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
2189 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; in c4iw_create_qp()
2190 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; in c4iw_create_qp()
2191 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; in c4iw_create_qp()
2192 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; in c4iw_create_qp()
2194 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; in c4iw_create_qp()
2195 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; in c4iw_create_qp()
2197 qhp->attr.state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
2198 qhp->attr.next_state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
2199 qhp->attr.enable_rdma_read = 1; in c4iw_create_qp()
2200 qhp->attr.enable_rdma_write = 1; in c4iw_create_qp()
2201 qhp->attr.enable_bind = 1; in c4iw_create_qp()
2202 qhp->attr.max_ord = 0; in c4iw_create_qp()
2203 qhp->attr.max_ird = 0; in c4iw_create_qp()
2204 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; in c4iw_create_qp()
2205 spin_lock_init(&qhp->lock); in c4iw_create_qp()
2206 mutex_init(&qhp->mutex); in c4iw_create_qp()
2207 init_waitqueue_head(&qhp->wait); in c4iw_create_qp()
2208 init_completion(&qhp->qp_rel_comp); in c4iw_create_qp()
2209 refcount_set(&qhp->qp_refcnt, 1); in c4iw_create_qp()
2211 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); in c4iw_create_qp()
2242 if (t4_sq_onchip(&qhp->wq.sq)) { in c4iw_create_qp()
2254 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp()
2255 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp()
2256 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp()
2258 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp()
2259 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp()
2260 uresp.rq_memsize = qhp->wq.rq.memsize; in c4iw_create_qp()
2284 sq_key_mm->addr = qhp->wq.sq.phys_addr; in c4iw_create_qp()
2285 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); in c4iw_create_qp()
2289 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); in c4iw_create_qp()
2290 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); in c4iw_create_qp()
2294 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; in c4iw_create_qp()
2300 (u64)(unsigned long)qhp->wq.rq.bar2_pa; in c4iw_create_qp()
2313 qhp->ucontext = ucontext; in c4iw_create_qp()
2316 qhp->wq.qp_errp = in c4iw_create_qp()
2317 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err; in c4iw_create_qp()
2319 qhp->wq.qp_errp = in c4iw_create_qp()
2320 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; in c4iw_create_qp()
2321 qhp->wq.srqidxp = in c4iw_create_qp()
2322 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; in c4iw_create_qp()
2325 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp()
2327 qhp->srq = to_c4iw_srq(attrs->srq); in c4iw_create_qp()
2328 INIT_LIST_HEAD(&qhp->db_fc_entry); in c4iw_create_qp()
2330 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp()
2331 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, in c4iw_create_qp()
2332 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); in c4iw_create_qp()
2347 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); in c4iw_create_qp()
2349 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
2352 c4iw_put_wr_wait(qhp->wr_waitp); in c4iw_create_qp()
2360 struct c4iw_qp *qhp; in c4iw_ib_modify_qp() local
2377 qhp = to_c4iw_qp(ibqp); in c4iw_ib_modify_qp()
2378 rhp = qhp->rhp; in c4iw_ib_modify_qp()
2407 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); in c4iw_ib_modify_qp()
2458 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_ib_query_qp() local
2462 attr->qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
2463 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
2464 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; in c4iw_ib_query_qp()
2465 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; in c4iw_ib_query_qp()
2466 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
2467 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; in c4iw_ib_query_qp()
2469 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; in c4iw_ib_query_qp()