/linux-6.6.21/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 189 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_dis() local 192 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); in ice_qp_dis() 193 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, in ice_qp_dis() 246 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; in ice_qp_ena() local 250 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); in ice_qp_ena() 253 ice_set_ring_xdp(xdp_ring); in ice_qp_ena() 425 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); in ice_xsk_pool_setup() 616 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) in ice_clean_xdp_irq_zc() argument 618 u16 ntc = xdp_ring->next_to_clean; in ice_clean_xdp_irq_zc() 620 u16 cnt = xdp_ring->count; in ice_clean_xdp_irq_zc() [all …]
|
D | ice_txrx_lib.c | 253 static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) in ice_clean_xdp_irq() argument 256 struct device *dev = xdp_ring->dev; in ice_clean_xdp_irq() 257 u32 ntc = xdp_ring->next_to_clean; in ice_clean_xdp_irq() 259 u32 cnt = xdp_ring->count; in ice_clean_xdp_irq() 266 idx = xdp_ring->tx_buf[ntc].rs_idx; in ice_clean_xdp_irq() 267 tx_desc = ICE_TX_DESC(xdp_ring, idx); in ice_clean_xdp_irq() 284 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; in ice_clean_xdp_irq() 300 tx_buf = &xdp_ring->tx_buf[ntc]; in ice_clean_xdp_irq() 315 xdp_ring->next_to_clean = ntc; in ice_clean_xdp_irq() 316 xdp_ring->xdp_tx_active -= xdp_tx; in ice_clean_xdp_irq() [all …]
|
D | ice_txrx_lib.h | 129 static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring) in ice_xdp_ring_update_tail() argument 135 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in ice_xdp_ring_update_tail() 144 static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) in ice_set_rs_bit() argument 146 u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; in ice_set_rs_bit() 149 tx_desc = ICE_TX_DESC(xdp_ring, rs_idx); in ice_set_rs_bit() 156 void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx); 157 int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); 158 int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
|
D | ice_xsk.h | 28 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); 29 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); 32 static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) in ice_xmit_zc() argument 72 static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } in ice_xsk_clean_xdp_ring() argument
|
D | ice_txrx.c | 560 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, in ice_run_xdp() argument 575 spin_lock(&xdp_ring->tx_lock); in ice_run_xdp() 576 ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); in ice_run_xdp() 578 spin_unlock(&xdp_ring->tx_lock); in ice_run_xdp() 607 struct ice_tx_ring *xdp_ring) in ice_xmit_xdp_ring() argument 617 return __ice_xmit_xdp_ring(&xdp, xdp_ring, true); in ice_xmit_xdp_ring() 639 struct ice_tx_ring *xdp_ring; in ice_xdp_xmit() local 654 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit() 655 spin_lock(&xdp_ring->tx_lock); in ice_xdp_xmit() 660 xdp_ring = vsi->xdp_rings[queue_index]; in ice_xdp_xmit() [all …]
|
D | ice_main.c | 2595 struct ice_tx_ring *xdp_ring; in ice_xdp_alloc_setup_rings() local 2597 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); in ice_xdp_alloc_setup_rings() 2598 if (!xdp_ring) in ice_xdp_alloc_setup_rings() 2603 ice_free_tx_ring(xdp_ring); in ice_xdp_alloc_setup_rings() 2607 xdp_ring->ring_stats = ring_stats; in ice_xdp_alloc_setup_rings() 2608 xdp_ring->q_index = xdp_q_idx; in ice_xdp_alloc_setup_rings() 2609 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings() 2610 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings() 2611 xdp_ring->netdev = NULL; in ice_xdp_alloc_setup_rings() 2612 xdp_ring->dev = dev; in ice_xdp_alloc_setup_rings() [all …]
|
D | ice_txrx.h | 334 struct ice_tx_ring *xdp_ring; member
|
/linux-6.6.21/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 205 struct i40e_ring *xdp_ring; in i40e_run_xdp_zc() local 225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp_zc() 226 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc() 537 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, in i40e_xmit_pkt() argument 544 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt() 545 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); in i40e_xmit_pkt() 547 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); in i40e_xmit_pkt() 554 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, in i40e_xmit_pkt_batch() argument 557 u16 ntu = xdp_ring->next_to_use; in i40e_xmit_pkt_batch() 565 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); in i40e_xmit_pkt_batch() [all …]
|
D | i40e_txrx.c | 2298 struct i40e_ring *xdp_ring); 2300 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) in i40e_xmit_xdp_tx_ring() argument 2307 return i40e_xmit_xdp_ring(xdpf, xdp_ring); in i40e_xmit_xdp_tx_ring() 2319 struct i40e_ring *xdp_ring; in i40e_run_xdp() local 2332 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp() 2333 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp() 2364 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) in i40e_xdp_ring_update_tail() argument 2370 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in i40e_xdp_ring_update_tail() 2408 struct i40e_ring *xdp_ring = in i40e_finalize_xdp_rx() local 2411 i40e_xdp_ring_update_tail(xdp_ring); in i40e_finalize_xdp_rx() [all …]
|
D | i40e_txrx_common.h | 7 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); 12 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
|
D | i40e_debugfs.c | 330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_dbg_dump_vsi_seid() local 332 if (!xdp_ring) in i40e_dbg_dump_vsi_seid() 337 i, *xdp_ring->state, in i40e_dbg_dump_vsi_seid() 338 xdp_ring->queue_index, in i40e_dbg_dump_vsi_seid() 339 xdp_ring->reg_idx); in i40e_dbg_dump_vsi_seid() 343 xdp_ring->next_to_use, in i40e_dbg_dump_vsi_seid() 344 xdp_ring->next_to_clean, in i40e_dbg_dump_vsi_seid() 345 xdp_ring->ring_active); in i40e_dbg_dump_vsi_seid() 348 i, xdp_ring->stats.packets, in i40e_dbg_dump_vsi_seid() 349 xdp_ring->stats.bytes, in i40e_dbg_dump_vsi_seid() [all …]
|
/linux-6.6.21/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 396 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) in ixgbe_xmit_zc() argument 398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 407 if (unlikely(!ixgbe_desc_unused(xdp_ring))) { in ixgbe_xmit_zc() 412 if (!netif_carrier_ok(xdp_ring->netdev)) in ixgbe_xmit_zc() 421 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; in ixgbe_xmit_zc() 426 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc() 438 xdp_ring->next_to_use++; in ixgbe_xmit_zc() 439 if (xdp_ring->next_to_use == xdp_ring->count) in ixgbe_xmit_zc() 440 xdp_ring->next_to_use = 0; in ixgbe_xmit_zc() 444 ixgbe_xdp_ring_update_tail(xdp_ring); in ixgbe_xmit_zc() [all …]
|
D | ixgbe_main.c | 619 ring = adapter->xdp_ring[n]; in ixgbe_dump() 958 &adapter->xdp_ring[i]->state); in ixgbe_update_xoff_rx_lfc() 1005 struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; in ixgbe_update_xoff_received() local 1007 tc = xdp_ring->dcb_tc; in ixgbe_update_xoff_received() 1009 clear_bit(__IXGBE_HANG_CHECK_ARMED, &xdp_ring->state); in ixgbe_update_xoff_received() 3629 ixgbe_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbe_configure_tx() 5887 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx() 5934 struct ixgbe_ring *ring = adapter->xdp_ring[i]; in ixgbe_disable_tx() 6121 ixgbe_clean_tx_ring(adapter->xdp_ring[i]); in ixgbe_clean_all_tx_rings() 6162 if (adapter->xdp_ring[0]) in ixgbe_down() [all …]
|
D | ixgbe_lib.c | 264 adapter->xdp_ring[i]->reg_idx = reg_idx; in ixgbe_cache_ring_rss() 955 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); in ixgbe_alloc_q_vector() 1027 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); in ixgbe_free_q_vector() 1113 if (adapter->xdp_ring[i]) in ixgbe_alloc_q_vectors() 1114 adapter->xdp_ring[i]->ring_idx = i; in ixgbe_alloc_q_vectors()
|
D | ixgbe.h | 682 struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS]; member 839 return adapter->xdp_ring[index]; in ixgbe_determine_xdp_ring()
|
/linux-6.6.21/drivers/net/ethernet/amazon/ena/ |
D | ena_netdev.c | 60 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); 196 struct ena_ring *xdp_ring; in ena_xdp_io_poll() local 200 xdp_ring = ena_napi->xdp_ring; in ena_xdp_io_poll() 204 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || in ena_xdp_io_poll() 205 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { in ena_xdp_io_poll() 210 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); in ena_xdp_io_poll() 215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { in ena_xdp_io_poll() 221 ena_unmask_interrupt(xdp_ring, NULL); in ena_xdp_io_poll() 222 ena_update_ring_numa_node(xdp_ring, NULL); in ena_xdp_io_poll() 228 u64_stats_update_begin(&xdp_ring->syncp); in ena_xdp_io_poll() [all …]
|
D | ena_netdev.h | 141 struct ena_ring *xdp_ring; member 254 struct ena_ring *xdp_ring; member
|
/linux-6.6.21/net/xdp/ |
D | xsk_queue.h | 16 struct xdp_ring { struct 30 struct xdp_ring ptrs; argument 36 struct xdp_ring ptrs; 45 struct xdp_ring *ring;
|
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | xsk.c | 126 tx_ring = r_vec->xdp_ring; in nfp_nfd3_xsk_rx() 399 if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring)) in nfp_nfd3_xsk_poll() 402 nfp_nfd3_xsk_tx(r_vec->xdp_ring); in nfp_nfd3_xsk_poll()
|
/linux-6.6.21/drivers/net/ |
D | veth.c | 67 struct ptr_ring xdp_ring; member 308 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) { in veth_xdp_rx() 501 spin_lock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit() 507 __ptr_ring_produce(&rq->xdp_ring, ptr))) in veth_xdp_xmit() 511 spin_unlock(&rq->xdp_ring.producer_lock); in veth_xdp_xmit() 942 void *ptr = __ptr_ring_consume(&rq->xdp_ring); in veth_xdp_rcv() 1011 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { in veth_poll() 1060 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); in __veth_napi_enable_range() 1076 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); in __veth_napi_enable_range() 1110 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); in veth_napi_del_range()
|
/linux-6.6.21/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 1053 struct ixgbevf_ring *xdp_ring; in ixgbevf_run_xdp() local 1067 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp() 1068 result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); in ixgbevf_run_xdp() 1242 struct ixgbevf_ring *xdp_ring = in ixgbevf_clean_rx_irq() local 1243 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq() 1249 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); in ixgbevf_clean_rx_irq() 1762 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbevf_configure_tx() 2487 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); in ixgbevf_clean_all_tx_rings() 2527 u8 reg_idx = adapter->xdp_ring[i]->reg_idx; in ixgbevf_down() 2799 adapter->xdp_ring[xdp_idx] = ring; in ixgbevf_alloc_q_vector() [all …]
|
D | ethtool.c | 269 adapter->xdp_ring[i]->count = new_tx_count; in ixgbevf_set_ringparam() 307 tx_ring[i] = *adapter->xdp_ring[j]; in ixgbevf_set_ringparam() 368 ixgbevf_free_tx_resources(adapter->xdp_ring[j]); in ixgbevf_set_ringparam() 369 *adapter->xdp_ring[j] = tx_ring[i]; in ixgbevf_set_ringparam() 470 ring = adapter->xdp_ring[j]; in ixgbevf_get_ethtool_stats()
|
D | ixgbevf.h | 336 struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES]; member
|
/linux-6.6.21/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugfs.c | 92 tx_ring = r_vec->xdp_ring; in nfp_tx_q_show()
|
D | nfp_net.h | 419 struct nfp_net_tx_ring *xdp_ring; member
|