/linux-6.6.21/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 26 struct bnxt_tx_ring_info *txr, in bnxt_xmit_bd() argument 44 prod = txr->tx_prod; in bnxt_xmit_bd() 45 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 50 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 66 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd() 69 frag_tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 72 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 90 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd() 95 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in __bnxt_xmit_xdp() argument 101 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() [all …]
|
D | bnxt.c | 334 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_sched_reset_txr() argument 337 struct bnxt_napi *bnapi = txr->bnapi; in bnxt_sched_reset_txr() 343 txr->txq_index, bnapi->tx_pkts, in bnxt_sched_reset_txr() 344 txr->tx_cons, txr->tx_prod, idx); in bnxt_sched_reset_txr() 382 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_txr_db_kick() argument 385 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick() 386 txr->kick_pending = 0; in bnxt_txr_db_kick() 401 struct bnxt_tx_ring_info *txr; in bnxt_start_xmit() local 413 txr = &bp->tx_ring[bp->tx_ring_map[i]]; in bnxt_start_xmit() 414 prod = txr->tx_prod; in bnxt_start_xmit() [all …]
|
D | bnxt_xdp.h | 16 struct bnxt_tx_ring_info *txr,
|
D | bnxt.h | 2258 const struct bnxt_tx_ring_info *txr) in bnxt_tx_avail() argument 2260 u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnxt_tx_avail() 2355 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
|
D | bnxt_ethtool.c | 3622 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; in bnxt_run_loopback() local 3652 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); in bnxt_run_loopback() 3657 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_run_loopback()
|
/linux-6.6.21/drivers/net/ethernet/qualcomm/ |
D | qca_spi.c | 286 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_transmit() 299 while (qca->txr.skb[qca->txr.head]) { in qcaspi_transmit() 300 pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; in qcaspi_transmit() 308 if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { in qcaspi_transmit() 315 n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; in qcaspi_transmit() 323 dev_kfree_skb(qca->txr.skb[qca->txr.head]); in qcaspi_transmit() 324 qca->txr.skb[qca->txr.head] = NULL; in qcaspi_transmit() 325 qca->txr.size -= pkt_len; in qcaspi_transmit() 326 new_head = qca->txr.head + 1; in qcaspi_transmit() 327 if (new_head >= qca->txr.count) in qcaspi_transmit() [all …]
|
D | qca_debug.c | 82 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_info_show() 84 else if (qca->txr.skb[qca->txr.tail]) in qcaspi_info_show() 92 qca->txr.size); in qcaspi_info_show() 260 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam() 278 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam() 279 qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); in qcaspi_set_ringparam()
|
D | qca_spi.h | 86 struct tx_ring txr; member
|
/linux-6.6.21/drivers/net/ethernet/broadcom/ |
D | bnx2.c | 246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) in bnx2_tx_avail() argument 253 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnx2_tx_avail() 696 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_free_tx_mem() local 698 if (txr->tx_desc_ring) { in bnx2_free_tx_mem() 700 txr->tx_desc_ring, in bnx2_free_tx_mem() 701 txr->tx_desc_mapping); in bnx2_free_tx_mem() 702 txr->tx_desc_ring = NULL; in bnx2_free_tx_mem() 704 kfree(txr->tx_buf_ring); in bnx2_free_tx_mem() 705 txr->tx_buf_ring = NULL; in bnx2_free_tx_mem() 748 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_alloc_tx_mem() local [all …]
|
D | bcmsysport.c | 1142 struct bcm_sysport_tx_ring *txr; in bcm_sysport_rx_isr() local 1177 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr() 1179 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr() 1181 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr() 1193 struct bcm_sysport_tx_ring *txr; in bcm_sysport_tx_isr() local 1209 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr() 1211 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr() 1213 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
|
/linux-6.6.21/drivers/net/ethernet/sgi/ |
D | ioc3-eth.c | 86 struct ioc3_etxd *txr; member 615 desc = &ip->txr[entry]; in ioc3_tx_unmap() 642 ip->txr[i].cmd = 0; in ioc3_clean_tx_ring() 902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); in ioc3eth_probe() 1041 desc = &ip->txr[produce]; in ioc3_start_xmit()
|
/linux-6.6.21/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 695 struct ena_ring *txr, *rxr; in ena_init_io_rings() local 701 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 705 ena_init_io_rings_common(adapter, txr, i); in ena_init_io_rings() 708 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings() 709 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings() 710 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings() 711 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings() 712 txr->smoothed_interval = in ena_init_io_rings() 714 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings() 715 spin_lock_init(&txr->xdp_tx_lock); in ena_init_io_rings() [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/ |
D | xskxceiver.c | 250 struct xsk_ring_prod *txr; in __xsk_configure_socket() local 261 txr = ifobject->tx_on ? &xsk->tx : NULL; in __xsk_configure_socket() 263 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); in __xsk_configure_socket()
|