Lines Matching refs:srq

1341 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,  in defer_srq_wr()  argument
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx]; in defer_srq_wr()
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx, in defer_srq_wr()
1348 srq->in_use, srq->ooo_count, in defer_srq_wr()
1349 (unsigned long long)wr_id, srq->pending_cidx, in defer_srq_wr()
1350 srq->pending_pidx, srq->pending_in_use); in defer_srq_wr()
1354 t4_srq_produce_pending_wr(srq); in defer_srq_wr()
1361 struct c4iw_srq *srq; in c4iw_post_srq_recv() local
1368 srq = to_c4iw_srq(ibsrq); in c4iw_post_srq_recv()
1369 spin_lock_irqsave(&srq->lock, flag); in c4iw_post_srq_recv()
1370 num_wrs = t4_srq_avail(&srq->wq); in c4iw_post_srq_recv()
1372 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1393 wqe->recv.wrid = srq->wq.pidx; in c4iw_post_srq_recv()
1399 if (srq->wq.ooo_count || in c4iw_post_srq_recv()
1400 srq->wq.pending_in_use || in c4iw_post_srq_recv()
1401 srq->wq.sw_rq[srq->wq.pidx].valid) { in c4iw_post_srq_recv()
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); in c4iw_post_srq_recv()
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; in c4iw_post_srq_recv()
1405 srq->wq.sw_rq[srq->wq.pidx].valid = 1; in c4iw_post_srq_recv()
1406 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); in c4iw_post_srq_recv()
1408 __func__, srq->wq.cidx, in c4iw_post_srq_recv()
1409 srq->wq.pidx, srq->wq.wq_pidx, in c4iw_post_srq_recv()
1410 srq->wq.in_use, in c4iw_post_srq_recv()
1412 t4_srq_produce(&srq->wq, len16); in c4iw_post_srq_recv()
1419 t4_ring_srq_db(&srq->wq, idx, len16, wqe); in c4iw_post_srq_recv()
1420 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1622 if (!qhp->srq) { in __flush_qp()
1803 if (qhp->srq) { in rdma_init()
1805 qhp->srq->idx); in rdma_init()
2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2139 if (!attrs->srq) { in c4iw_create_qp()
2162 if (!attrs->srq) { in c4iw_create_qp()
2171 if (!attrs->srq) in c4iw_create_qp()
2178 qhp->wr_waitp, !attrs->srq); in c4iw_create_qp()
2193 if (!attrs->srq) { in c4iw_create_qp()
2221 if (!attrs->srq) { in c4iw_create_qp()
2233 if (!attrs->srq) { in c4iw_create_qp()
2257 if (!attrs->srq) { in c4iw_create_qp()
2269 if (!attrs->srq) { in c4iw_create_qp()
2275 if (!attrs->srq) { in c4iw_create_qp()
2287 if (!attrs->srq) { in c4iw_create_qp()
2297 if (!attrs->srq) { in c4iw_create_qp()
2315 if (!attrs->srq) { in c4iw_create_qp()
2326 if (attrs->srq) in c4iw_create_qp()
2327 qhp->srq = to_c4iw_srq(attrs->srq); in c4iw_create_qp()
2337 if (!attrs->srq) in c4iw_create_qp()
2342 if (!attrs->srq) in c4iw_create_qp()
2350 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2416 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq) in c4iw_dispatch_srq_limit_reached_event() argument
2420 event.device = &srq->rhp->ibdev; in c4iw_dispatch_srq_limit_reached_event()
2421 event.element.srq = &srq->ibsrq; in c4iw_dispatch_srq_limit_reached_event()
2430 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); in c4iw_modify_srq() local
2437 c4iw_dispatch_srq_limit_reached_event(srq); in c4iw_modify_srq()
2448 srq->armed = true; in c4iw_modify_srq()
2449 srq->srq_limit = attr->srq_limit; in c4iw_modify_srq()
2473 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, in free_srq_queue() argument
2476 struct c4iw_rdev *rdev = &srq->rhp->rdev; in free_srq_queue()
2477 struct sk_buff *skb = srq->destroy_skb; in free_srq_queue()
2478 struct t4_srq *wq = &srq->wq; in free_srq_queue()
2494 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in free_srq_queue()
2495 res->u.srq.op = FW_RI_RES_OP_RESET; in free_srq_queue()
2496 res->u.srq.srqid = cpu_to_be32(srq->idx); in free_srq_queue()
2497 res->u.srq.eqid = cpu_to_be32(wq->qid); in free_srq_queue()
2510 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, in alloc_srq_queue() argument
2513 struct c4iw_rdev *rdev = &srq->rhp->rdev; in alloc_srq_queue()
2515 struct t4_srq *wq = &srq->wq; in alloc_srq_queue()
2532 wq->pending_wrs = kcalloc(srq->wq.size, in alloc_srq_queue()
2533 sizeof(*srq->wq.pending_wrs), in alloc_srq_queue()
2584 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in alloc_srq_queue()
2585 res->u.srq.op = FW_RI_RES_OP_WRITE; in alloc_srq_queue()
2592 res->u.srq.eqid = cpu_to_be32(wq->qid); in alloc_srq_queue()
2593 res->u.srq.fetchszm_to_iqid = in alloc_srq_queue()
2599 res->u.srq.dcaen_to_eqsize = in alloc_srq_queue()
2607 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); in alloc_srq_queue()
2608 res->u.srq.srqid = cpu_to_be32(srq->idx); in alloc_srq_queue()
2609 res->u.srq.pdid = cpu_to_be32(srq->pdid); in alloc_srq_queue()
2610 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); in alloc_srq_queue()
2611 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - in alloc_srq_queue()
2622 __func__, srq->idx, wq->qid, srq->pdid, wq->queue, in alloc_srq_queue()
2645 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) in c4iw_copy_wr_to_srq() argument
2650 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_copy_wr_to_srq()
2653 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2654 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2656 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2657 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2667 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); in c4iw_create_srq() local
2684 if (!rhp->rdev.lldi.vr->srq.size) in c4iw_create_srq()
2700 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_srq()
2701 if (!srq->wr_waitp) in c4iw_create_srq()
2704 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); in c4iw_create_srq()
2705 if (srq->idx < 0) { in c4iw_create_srq()
2711 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); in c4iw_create_srq()
2712 if (!srq->destroy_skb) { in c4iw_create_srq()
2717 srq->rhp = rhp; in c4iw_create_srq()
2718 srq->pdid = php->pdid; in c4iw_create_srq()
2720 srq->wq.size = rqsize; in c4iw_create_srq()
2721 srq->wq.memsize = in c4iw_create_srq()
2723 sizeof(*srq->wq.queue); in c4iw_create_srq()
2725 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); in c4iw_create_srq()
2727 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx : in c4iw_create_srq()
2728 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2734 srq->flags = T4_SRQ_LIMIT_SUPPORT; in c4iw_create_srq()
2748 uresp.flags = srq->flags; in c4iw_create_srq()
2750 uresp.srqid = srq->wq.qid; in c4iw_create_srq()
2751 uresp.srq_size = srq->wq.size; in c4iw_create_srq()
2752 uresp.srq_memsize = srq->wq.memsize; in c4iw_create_srq()
2753 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; in c4iw_create_srq()
2764 srq_key_mm->addr = virt_to_phys(srq->wq.queue); in c4iw_create_srq()
2765 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); in c4iw_create_srq()
2768 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; in c4iw_create_srq()
2774 __func__, srq->wq.qid, srq->idx, srq->wq.size, in c4iw_create_srq()
2775 (unsigned long)srq->wq.memsize, attrs->attr.max_wr); in c4iw_create_srq()
2777 spin_lock_init(&srq->lock); in c4iw_create_srq()
2785 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
2786 srq->wr_waitp); in c4iw_create_srq()
2788 kfree_skb(srq->destroy_skb); in c4iw_create_srq()
2790 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_create_srq()
2792 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_create_srq()
2799 struct c4iw_srq *srq; in c4iw_destroy_srq() local
2802 srq = to_c4iw_srq(ibsrq); in c4iw_destroy_srq()
2803 rhp = srq->rhp; in c4iw_destroy_srq()
2805 pr_debug("%s id %d\n", __func__, srq->wq.qid); in c4iw_destroy_srq()
2808 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_destroy_srq()
2809 srq->wr_waitp); in c4iw_destroy_srq()
2810 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_destroy_srq()
2811 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_destroy_srq()