/linux-6.1.9/drivers/infiniband/hw/hfi1/ |
D | vnic_sdma.c | 29 struct sdma_txreq txreq; member 38 static void vnic_sdma_complete(struct sdma_txreq *txreq, in vnic_sdma_complete() argument 41 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() 44 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 56 &tx->txreq, in build_vnic_ulp_payload() 67 &tx->txreq, in build_vnic_ulp_payload() 76 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_vnic_ulp_payload() 92 &tx->txreq, in build_vnic_tx_desc() 107 &tx->txreq, in build_vnic_tx_desc() 155 &tx->txreq, vnic_sdma->pkts_sent); in hfi1_vnic_send_dma() [all …]
|
D | ipoib_tx.c | 131 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_free_tx() 145 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_ring() 188 static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status) in hfi1_ipoib_sdma_complete() argument 190 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() 203 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload() local 209 ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb)); in hfi1_ipoib_build_ulp_payload() 218 txreq, in hfi1_ipoib_build_ulp_payload() 233 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_tx_desc() local 239 ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete); in hfi1_ipoib_build_tx_desc() 245 txreq, in hfi1_ipoib_build_tx_desc() [all …]
|
D | verbs_txreq.h | 18 struct sdma_txreq txreq; member 53 tx->txreq.num_desc = 0; in get_txreq() 56 tx->txreq.flags = 0; in get_txreq() 66 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
|
D | user_sdma.c | 40 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 64 struct sdma_txreq *txreq, 87 struct sdma_txreq *txreq, in defer_packet_queue() argument 96 if (sdma_progress(sde, seq, txreq)) in defer_packet_queue() 688 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg() 693 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg() 695 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg() 719 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], in user_sdma_txadd() 851 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() 887 list_add_tail(&tx->txreq.list, &req->txps); in user_sdma_send_pkts() [all …]
|
D | verbs.c | 590 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 637 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem() 671 &tx->txreq, in build_verbs_ulp_payload() 740 &tx->txreq, in build_verbs_tx_desc() 754 &tx->txreq, in build_verbs_tx_desc() 761 &tx->txreq, in build_verbs_tx_desc() 781 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, in build_verbs_tx_desc() 823 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma() 852 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); in hfi1_verbs_send_dma() 900 list_add_tail(&ps->s_txreq->txreq.list, in pio_wait() [all …]
|
D | ipoib.h | 57 struct sdma_txreq txreq; member
|
D | verbs_txreq.c | 27 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
|
D | user_sdma.h | 198 struct sdma_txreq txreq; member
|
D | qp.c | 118 container_of(tx, struct verbs_txreq, txreq)); in flush_list_head() 443 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|
D | rc.c | 349 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_rc_ack()
|
D | tid_rdma.c | 5318 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_tid_rdma_ack()
|
/linux-6.1.9/drivers/net/xen-netback/ |
D | netback.c | 882 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 909 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops() 912 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops() 913 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 916 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 923 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 938 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 952 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 958 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? in xenvif_tx_build_gops() 959 XEN_NETBACK_TX_COPY_LEN : txreq.size; in xenvif_tx_build_gops() [all …]
|
/linux-6.1.9/drivers/infiniband/hw/qib/ |
D | qib_sdma.c | 496 tx->txreq.start_idx = 0; in complete_sdma_err_req() 497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send() 544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send() 547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 603 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) in qib_sdma_verbs_send() [all …]
|
D | qib_verbs.c | 575 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq() 604 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq() 627 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq() 628 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 630 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 638 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 686 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 688 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 714 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete() 724 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) in sdma_complete() [all …]
|
D | qib.h | 248 struct qib_sdma_txreq txreq; member
|