/linux-6.1.9/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 26 struct bnxt_tx_ring_info *txr, in bnxt_xmit_bd() argument 44 prod = txr->tx_prod; in bnxt_xmit_bd() 45 tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 50 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 67 txr->tx_prod = prod; in bnxt_xmit_bd() 70 frag_tx_buf = &txr->tx_buf_ring[prod]; in bnxt_xmit_bd() 73 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; in bnxt_xmit_bd() 97 txr->tx_prod = prod; in bnxt_xmit_bd() 102 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in __bnxt_xmit_xdp() argument 108 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() [all …]
|
D | bnxt.c | 328 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, in bnxt_txr_db_kick() argument 331 bnxt_db_write(bp, &txr->tx_db, prod); in bnxt_txr_db_kick() 332 txr->kick_pending = 0; in bnxt_txr_db_kick() 336 struct bnxt_tx_ring_info *txr, in bnxt_txr_netif_try_stop_queue() argument 347 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) { in bnxt_txr_netif_try_stop_queue() 367 struct bnxt_tx_ring_info *txr; in bnxt_start_xmit() local 379 txr = &bp->tx_ring[bp->tx_ring_map[i]]; in bnxt_start_xmit() 380 prod = txr->tx_prod; in bnxt_start_xmit() 382 free_size = bnxt_tx_avail(bp, txr); in bnxt_start_xmit() 385 if (net_ratelimit() && txr->kick_pending) in bnxt_start_xmit() [all …]
|
D | bnxt_xdp.h | 16 struct bnxt_tx_ring_info *txr,
|
D | bnxt.h | 2219 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) in bnxt_tx_avail() argument 2225 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); in bnxt_tx_avail()
|
D | bnxt_ethtool.c | 3523 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; in bnxt_run_loopback() local 3553 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); in bnxt_run_loopback() 3558 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_run_loopback()
|
/linux-6.1.9/drivers/net/ethernet/qualcomm/ |
D | qca_spi.c | 287 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_transmit() 300 while (qca->txr.skb[qca->txr.head]) { in qcaspi_transmit() 301 pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; in qcaspi_transmit() 309 if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { in qcaspi_transmit() 316 n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; in qcaspi_transmit() 324 dev_kfree_skb(qca->txr.skb[qca->txr.head]); in qcaspi_transmit() 325 qca->txr.skb[qca->txr.head] = NULL; in qcaspi_transmit() 326 qca->txr.size -= pkt_len; in qcaspi_transmit() 327 new_head = qca->txr.head + 1; in qcaspi_transmit() 328 if (new_head >= qca->txr.count) in qcaspi_transmit() [all …]
|
D | qca_debug.c | 80 if (qca->txr.skb[qca->txr.head] == NULL) in qcaspi_info_show() 82 else if (qca->txr.skb[qca->txr.tail]) in qcaspi_info_show() 90 qca->txr.size); in qcaspi_info_show() 258 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam() 277 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam() 278 qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); in qcaspi_set_ringparam()
|
D | qca_spi.h | 86 struct tx_ring txr; member
|
/linux-6.1.9/drivers/net/ethernet/freescale/enetc/ |
D | enetc.c | 1737 static int enetc_alloc_txbdr(struct enetc_bdr *txr) in enetc_alloc_txbdr() argument 1741 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); in enetc_alloc_txbdr() 1742 if (!txr->tx_swbd) in enetc_alloc_txbdr() 1745 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); in enetc_alloc_txbdr() 1749 txr->tso_headers = dma_alloc_coherent(txr->dev, in enetc_alloc_txbdr() 1750 txr->bd_count * TSO_HEADER_SIZE, in enetc_alloc_txbdr() 1751 &txr->tso_headers_dma, in enetc_alloc_txbdr() 1753 if (!txr->tso_headers) { in enetc_alloc_txbdr() 1758 txr->next_to_clean = 0; in enetc_alloc_txbdr() 1759 txr->next_to_use = 0; in enetc_alloc_txbdr() [all …]
|
/linux-6.1.9/drivers/net/ethernet/broadcom/ |
D | bnx2.c | 247 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) in bnx2_tx_avail() argument 254 diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); in bnx2_tx_avail() 697 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_free_tx_mem() local 699 if (txr->tx_desc_ring) { in bnx2_free_tx_mem() 701 txr->tx_desc_ring, in bnx2_free_tx_mem() 702 txr->tx_desc_mapping); in bnx2_free_tx_mem() 703 txr->tx_desc_ring = NULL; in bnx2_free_tx_mem() 705 kfree(txr->tx_buf_ring); in bnx2_free_tx_mem() 706 txr->tx_buf_ring = NULL; in bnx2_free_tx_mem() 749 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; in bnx2_alloc_tx_mem() local [all …]
|
D | bcmsysport.c | 1131 struct bcm_sysport_tx_ring *txr; in bcm_sysport_rx_isr() local 1166 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr() 1168 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr() 1170 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr() 1182 struct bcm_sysport_tx_ring *txr; in bcm_sysport_tx_isr() local 1198 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr() 1200 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr() 1202 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
|
/linux-6.1.9/drivers/net/ethernet/sgi/ |
D | ioc3-eth.c | 86 struct ioc3_etxd *txr; member 615 desc = &ip->txr[entry]; in ioc3_tx_unmap() 642 ip->txr[i].cmd = 0; in ioc3_clean_tx_ring() 902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); in ioc3eth_probe() 1041 desc = &ip->txr[produce]; in ioc3_start_xmit()
|
/linux-6.1.9/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 692 struct ena_ring *txr, *rxr; in ena_init_io_rings() local 698 txr = &adapter->tx_ring[i]; in ena_init_io_rings() 702 ena_init_io_rings_common(adapter, txr, i); in ena_init_io_rings() 705 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings() 706 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings() 707 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings() 708 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings() 709 txr->smoothed_interval = in ena_init_io_rings() 711 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings() 712 spin_lock_init(&txr->xdp_tx_lock); in ena_init_io_rings()
|
/linux-6.1.9/tools/testing/selftests/bpf/ |
D | xskxceiver.c | 320 struct xsk_ring_prod *txr; in __xsk_configure_socket() local 331 txr = ifobject->tx_on ? &xsk->tx : NULL; in __xsk_configure_socket() 333 return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg); in __xsk_configure_socket()
|