Lines Matching refs:wq

150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,  in destroy_qp()  argument
157 dealloc_sq(rdev, &wq->sq); in destroy_qp()
158 kfree(wq->sq.sw_sq); in destroy_qp()
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp()
163 wq->rq.memsize, wq->rq.queue, in destroy_qp()
164 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp()
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp()
166 kfree(wq->rq.sw_rq); in destroy_qp()
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
214 if (!wq->sq.qid) in create_qp()
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
219 if (!wq->rq.qid) { in create_qp()
226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), in create_qp()
228 if (!wq->sq.sw_sq) { in create_qp()
234 wq->rq.sw_rq = kcalloc(wq->rq.size, in create_qp()
235 sizeof(*wq->rq.sw_rq), in create_qp()
237 if (!wq->rq.sw_rq) { in create_qp()
248 wq->rq.rqt_size = in create_qp()
249 roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); in create_qp()
250 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); in create_qp()
251 if (!wq->rq.rqt_hwaddr) { in create_qp()
257 ret = alloc_sq(rdev, &wq->sq, user); in create_qp()
260 memset(wq->sq.queue, 0, wq->sq.memsize); in create_qp()
261 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); in create_qp()
264 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, in create_qp()
265 wq->rq.memsize, in create_qp()
266 &wq->rq.dma_addr, in create_qp()
268 if (!wq->rq.queue) { in create_qp()
273 wq->sq.queue, in create_qp()
274 (unsigned long long)virt_to_phys(wq->sq.queue), in create_qp()
275 wq->rq.queue, in create_qp()
276 (unsigned long long)virt_to_phys(wq->rq.queue)); in create_qp()
277 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); in create_qp()
280 wq->db = rdev->lldi.db_reg; in create_qp()
282 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, in create_qp()
284 &wq->sq.bar2_qid, in create_qp()
285 user ? &wq->sq.bar2_pa : NULL); in create_qp()
287 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, in create_qp()
289 &wq->rq.bar2_qid, in create_qp()
290 user ? &wq->rq.bar2_pa : NULL); in create_qp()
295 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { in create_qp()
297 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); in create_qp()
302 wq->rdev = rdev; in create_qp()
303 wq->rq.msn = 1; in create_qp()
330 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + in create_qp()
337 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | in create_qp()
343 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : in create_qp()
348 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); in create_qp()
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); in create_qp()
359 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + in create_qp()
377 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); in create_qp()
378 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); in create_qp()
382 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__); in create_qp()
387 wq->sq.qid, wq->rq.qid, wq->db, in create_qp()
388 wq->sq.bar2_va, wq->rq.bar2_va); in create_qp()
394 wq->rq.memsize, wq->rq.queue, in create_qp()
395 dma_unmap_addr(&wq->rq, mapping)); in create_qp()
397 dealloc_sq(rdev, &wq->sq); in create_qp()
400 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in create_qp()
403 kfree(wq->rq.sw_rq); in create_qp()
405 kfree(wq->sq.sw_sq); in create_qp()
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in create_qp()
410 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in create_qp()
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in post_write_cmpl()
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in post_write_cmpl()
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); in post_write_cmpl()
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
713 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
724 write_wrid = qhp->wq.sq.pidx; in post_write_cmpl()
727 qhp->wq.sq.in_use++; in post_write_cmpl()
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size) in post_write_cmpl()
729 qhp->wq.sq.pidx = 0; in post_write_cmpl()
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
737 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; in post_write_cmpl()
753 t4_sq_produce(&qhp->wq, len16); in post_write_cmpl()
756 t4_ring_sq_db(&qhp->wq, idx, wqe); in post_write_cmpl()
764 ret = build_isgl((__be64 *)qhp->wq.rq.queue, in build_rdma_recv()
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], in build_rdma_recv()
919 t4_ring_sq_db(&qhp->wq, inc, NULL); in ring_kernel_sq_db()
922 qhp->wq.sq.wq_pidx_inc += inc; in ring_kernel_sq_db()
936 t4_ring_rq_db(&qhp->wq, inc, NULL); in ring_kernel_rq_db()
939 qhp->wq.rq.wq_pidx_inc += inc; in ring_kernel_rq_db()
1001 CQE_QPID_V(qhp->wq.sq.qid)); in complete_sq_drain_wr()
1052 CQE_QPID_V(qhp->wq.sq.qid)); in complete_rq_drain_wr()
1100 if (qhp->wq.flushed) { in c4iw_post_send()
1105 num_wrs = t4_sq_avail(&qhp->wq); in c4iw_post_send()
1142 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in c4iw_post_send()
1143 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_post_send()
1150 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in c4iw_post_send()
1161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1189 if (!qhp->wq.sq.oldest_read) in c4iw_post_send()
1190 qhp->wq.sq.oldest_read = swsqe; in c4iw_post_send()
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), in c4iw_post_send()
1229 swsqe->idx = qhp->wq.sq.pidx; in c4iw_post_send()
1241 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); in c4iw_post_send()
1244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
1248 t4_sq_produce(&qhp->wq, len16); in c4iw_post_send()
1252 t4_ring_sq_db(&qhp->wq, idx, wqe); in c4iw_post_send()
1279 if (qhp->wq.flushed) { in c4iw_post_receive()
1284 num_wrs = t4_rq_avail(&qhp->wq); in c4iw_post_receive()
1296 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + in c4iw_post_receive()
1297 qhp->wq.rq.wq_pidx * in c4iw_post_receive()
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = in c4iw_post_receive()
1313 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time = in c4iw_post_receive()
1319 wqe->recv.wrid = qhp->wq.rq.pidx; in c4iw_post_receive()
1325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
1326 t4_rq_produce(&qhp->wq, len16); in c4iw_post_receive()
1332 t4_ring_rq_db(&qhp->wq, idx, wqe); in c4iw_post_receive()
1370 num_wrs = t4_srq_avail(&srq->wq); in c4iw_post_srq_recv()
1393 wqe->recv.wrid = srq->wq.pidx; in c4iw_post_srq_recv()
1399 if (srq->wq.ooo_count || in c4iw_post_srq_recv()
1400 srq->wq.pending_in_use || in c4iw_post_srq_recv()
1401 srq->wq.sw_rq[srq->wq.pidx].valid) { in c4iw_post_srq_recv()
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); in c4iw_post_srq_recv()
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; in c4iw_post_srq_recv()
1405 srq->wq.sw_rq[srq->wq.pidx].valid = 1; in c4iw_post_srq_recv()
1406 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); in c4iw_post_srq_recv()
1408 __func__, srq->wq.cidx, in c4iw_post_srq_recv()
1409 srq->wq.pidx, srq->wq.wq_pidx, in c4iw_post_srq_recv()
1410 srq->wq.in_use, in c4iw_post_srq_recv()
1412 t4_srq_produce(&srq->wq, len16); in c4iw_post_srq_recv()
1419 t4_ring_srq_db(&srq->wq, idx, len16, wqe); in c4iw_post_srq_recv()
1567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, in post_terminate()
1611 if (qhp->wq.flushed) { in __flush_qp()
1618 qhp->wq.flushed = 1; in __flush_qp()
1619 t4_set_wq_in_error(&qhp->wq, 0); in __flush_qp()
1623 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp()
1624 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp()
1671 if (qhp->wq.flushed) in flush_qp()
1674 qhp->wq.flushed = 1; in flush_qp()
1675 t4_set_wq_in_error(&qhp->wq, 0); in flush_qp()
1699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); in rdma_fini()
1719 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_fini()
1756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); in rdma_init()
1799 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); in rdma_init()
1801 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1802 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1807 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); in rdma_init()
1808 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); in rdma_init()
1809 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - in rdma_init()
1822 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_init()
1846 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, in c4iw_modify_qp()
1934 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1947 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1964 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
2009 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { in c4iw_modify_qp()
2031 qhp->wq.sq.qid); in c4iw_modify_qp()
2089 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp()
2099 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); in c4iw_destroy_qp()
2102 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
2157 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
2158 qhp->wq.sq.memsize = in c4iw_create_qp()
2160 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); in c4iw_create_qp()
2161 qhp->wq.sq.flush_cidx = -1; in c4iw_create_qp()
2163 qhp->wq.rq.size = rqsize; in c4iw_create_qp()
2164 qhp->wq.rq.memsize = in c4iw_create_qp()
2166 sizeof(*qhp->wq.rq.queue); in c4iw_create_qp()
2170 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); in c4iw_create_qp()
2172 qhp->wq.rq.memsize = in c4iw_create_qp()
2173 roundup(qhp->wq.rq.memsize, PAGE_SIZE); in c4iw_create_qp()
2176 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
2211 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); in c4iw_create_qp()
2242 if (t4_sq_onchip(&qhp->wq.sq)) { in c4iw_create_qp()
2254 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp()
2255 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp()
2256 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp()
2258 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp()
2259 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp()
2260 uresp.rq_memsize = qhp->wq.rq.memsize; in c4iw_create_qp()
2284 sq_key_mm->addr = qhp->wq.sq.phys_addr; in c4iw_create_qp()
2285 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); in c4iw_create_qp()
2289 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); in c4iw_create_qp()
2290 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); in c4iw_create_qp()
2294 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; in c4iw_create_qp()
2300 (u64)(unsigned long)qhp->wq.rq.bar2_pa; in c4iw_create_qp()
2316 qhp->wq.qp_errp = in c4iw_create_qp()
2317 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err; in c4iw_create_qp()
2319 qhp->wq.qp_errp = in c4iw_create_qp()
2320 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; in c4iw_create_qp()
2321 qhp->wq.srqidxp = in c4iw_create_qp()
2322 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; in c4iw_create_qp()
2325 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp()
2330 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp()
2331 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, in c4iw_create_qp()
2332 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); in c4iw_create_qp()
2347 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); in c4iw_create_qp()
2349 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
2478 struct t4_srq *wq = &srq->wq; in free_srq_queue() local
2497 res->u.srq.eqid = cpu_to_be32(wq->qid); in free_srq_queue()
2503 wq->memsize, wq->queue, in free_srq_queue()
2504 dma_unmap_addr(wq, mapping)); in free_srq_queue()
2505 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); in free_srq_queue()
2506 kfree(wq->sw_rq); in free_srq_queue()
2507 c4iw_put_qpid(rdev, wq->qid, uctx); in free_srq_queue()
2515 struct t4_srq *wq = &srq->wq; in alloc_srq_queue() local
2523 wq->qid = c4iw_get_qpid(rdev, uctx); in alloc_srq_queue()
2524 if (!wq->qid) in alloc_srq_queue()
2528 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq), in alloc_srq_queue()
2530 if (!wq->sw_rq) in alloc_srq_queue()
2532 wq->pending_wrs = kcalloc(srq->wq.size, in alloc_srq_queue()
2533 sizeof(*srq->wq.pending_wrs), in alloc_srq_queue()
2535 if (!wq->pending_wrs) in alloc_srq_queue()
2539 wq->rqt_size = wq->size; in alloc_srq_queue()
2540 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size); in alloc_srq_queue()
2541 if (!wq->rqt_hwaddr) in alloc_srq_queue()
2543 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> in alloc_srq_queue()
2546 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, in alloc_srq_queue()
2547 &wq->dma_addr, GFP_KERNEL); in alloc_srq_queue()
2548 if (!wq->queue) in alloc_srq_queue()
2551 dma_unmap_addr_set(wq, mapping, wq->dma_addr); in alloc_srq_queue()
2553 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, in alloc_srq_queue()
2554 &wq->bar2_qid, in alloc_srq_queue()
2555 user ? &wq->bar2_pa : NULL); in alloc_srq_queue()
2561 if (user && !wq->bar2_va) { in alloc_srq_queue()
2563 pci_name(rdev->lldi.pdev), wq->qid); in alloc_srq_queue()
2590 eqsize = wq->size * T4_RQ_NUM_SLOTS + in alloc_srq_queue()
2592 res->u.srq.eqid = cpu_to_be32(wq->qid); in alloc_srq_queue()
2607 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); in alloc_srq_queue()
2610 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); in alloc_srq_queue()
2611 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - in alloc_srq_queue()
2616 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__); in alloc_srq_queue()
2622 __func__, srq->idx, wq->qid, srq->pdid, wq->queue, in alloc_srq_queue()
2623 (u64)virt_to_phys(wq->queue), wq->bar2_va, in alloc_srq_queue()
2624 wq->rqt_hwaddr, wq->rqt_size); in alloc_srq_queue()
2629 wq->memsize, wq->queue, in alloc_srq_queue()
2630 dma_unmap_addr(wq, mapping)); in alloc_srq_queue()
2632 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); in alloc_srq_queue()
2635 kfree(wq->pending_wrs); in alloc_srq_queue()
2638 kfree(wq->sw_rq); in alloc_srq_queue()
2640 c4iw_put_qpid(rdev, wq->qid, uctx); in alloc_srq_queue()
2720 srq->wq.size = rqsize; in c4iw_create_srq()
2721 srq->wq.memsize = in c4iw_create_srq()
2723 sizeof(*srq->wq.queue); in c4iw_create_srq()
2725 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); in c4iw_create_srq()
2750 uresp.srqid = srq->wq.qid; in c4iw_create_srq()
2751 uresp.srq_size = srq->wq.size; in c4iw_create_srq()
2752 uresp.srq_memsize = srq->wq.memsize; in c4iw_create_srq()
2753 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; in c4iw_create_srq()
2764 srq_key_mm->addr = virt_to_phys(srq->wq.queue); in c4iw_create_srq()
2765 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); in c4iw_create_srq()
2768 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; in c4iw_create_srq()
2774 __func__, srq->wq.qid, srq->idx, srq->wq.size, in c4iw_create_srq()
2775 (unsigned long)srq->wq.memsize, attrs->attr.max_wr); in c4iw_create_srq()
2805 pr_debug("%s id %d\n", __func__, srq->wq.qid); in c4iw_destroy_srq()