Home
last modified time | relevance | path

Searched refs:tx_ring (Results 1 – 25 of 314) sorted by relevance

12345678910>>...13

/linux-5.19.10/drivers/net/ethernet/netronome/nfp/nfd3/
Drings.c11 static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_xsk_tx_bufs_free() argument
16 while (tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_xsk_tx_bufs_free()
17 idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfd3_xsk_tx_bufs_free()
18 txbuf = &tx_ring->txbufs[idx]; in nfp_nfd3_xsk_tx_bufs_free()
22 tx_ring->qcp_rd_p++; in nfp_nfd3_xsk_tx_bufs_free()
23 tx_ring->rd_p++; in nfp_nfd3_xsk_tx_bufs_free()
25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free()
29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
42 nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_reset() argument
47 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfd3_tx_ring_reset()
[all …]
Dxsk.c17 struct nfp_net_tx_ring *tx_ring, in nfp_nfd3_xsk_tx_xdp() argument
26 if (nfp_net_tx_space(tx_ring) < 1) in nfp_nfd3_xsk_tx_xdp()
32 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); in nfp_nfd3_xsk_tx_xdp()
34 txbuf = &tx_ring->txbufs[wr_idx]; in nfp_nfd3_xsk_tx_xdp()
40 txd = &tx_ring->txds[wr_idx]; in nfp_nfd3_xsk_tx_xdp()
50 tx_ring->wr_ptr_add++; in nfp_nfd3_xsk_tx_xdp()
51 tx_ring->wr_p++; in nfp_nfd3_xsk_tx_xdp()
117 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_xsk_rx() local
123 tx_ring = r_vec->xdp_ring; in nfp_nfd3_xsk_rx()
227 if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring, in nfp_nfd3_xsk_rx()
[all …]
Ddp.c29 static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_wake() argument
31 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); in nfp_nfd3_tx_ring_should_wake()
34 static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_should_stop() argument
36 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); in nfp_nfd3_tx_ring_should_stop()
50 struct nfp_net_tx_ring *tx_ring) in nfp_nfd3_tx_ring_stop() argument
56 if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring))) in nfp_nfd3_tx_ring_stop()
224 struct nfp_net_tx_ring *tx_ring; in nfp_nfd3_tx() local
238 tx_ring = &dp->tx_rings[qidx]; in nfp_nfd3_tx()
239 r_vec = tx_ring->r_vec; in nfp_nfd3_tx()
243 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { in nfp_nfd3_tx()
[all …]
/linux-5.19.10/drivers/net/ethernet/netronome/nfp/nfdk/
Drings.c11 nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_reset() argument
16 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { in nfp_nfdk_tx_ring_reset()
23 rd_idx = D_IDX(tx_ring, tx_ring->rd_p); in nfp_nfdk_tx_ring_reset()
24 txbuf = &tx_ring->ktxbufs[rd_idx]; in nfp_nfdk_tx_ring_reset()
28 n_descs = D_BLOCK_CPL(tx_ring->rd_p); in nfp_nfdk_tx_ring_reset()
57 tx_ring->rd_p += n_descs; in nfp_nfdk_tx_ring_reset()
60 memset(tx_ring->txds, 0, tx_ring->size); in nfp_nfdk_tx_ring_reset()
61 tx_ring->data_pending = 0; in nfp_nfdk_tx_ring_reset()
62 tx_ring->wr_p = 0; in nfp_nfdk_tx_ring_reset()
63 tx_ring->rd_p = 0; in nfp_nfdk_tx_ring_reset()
[all …]
Ddp.c17 static int nfp_nfdk_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_should_wake() argument
19 return !nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT * 2); in nfp_nfdk_tx_ring_should_wake()
22 static int nfp_nfdk_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_should_stop() argument
24 return nfp_net_tx_full(tx_ring, NFDK_TX_DESC_STOP_CNT); in nfp_nfdk_tx_ring_should_stop()
28 struct nfp_net_tx_ring *tx_ring) in nfp_nfdk_tx_ring_stop() argument
34 if (unlikely(nfp_nfdk_tx_ring_should_wake(tx_ring))) in nfp_nfdk_tx_ring_stop()
127 nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring, in nfp_nfdk_tx_maybe_close_block() argument
159 if (round_down(tx_ring->wr_p, NFDK_TX_DESC_BLOCK_CNT) != in nfp_nfdk_tx_maybe_close_block()
160 round_down(tx_ring->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT)) in nfp_nfdk_tx_maybe_close_block()
163 if ((u32)tx_ring->data_pending + skb->len > NFDK_TX_MAX_DATA_PER_BLOCK) in nfp_nfdk_tx_maybe_close_block()
[all …]
/linux-5.19.10/drivers/net/ethernet/netronome/nfp/
Dnfp_net_dp.h51 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) in nfp_net_tx_full() argument
53 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); in nfp_net_tx_full()
56 static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) in nfp_net_tx_xmit_more_flush() argument
59 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); in nfp_net_tx_xmit_more_flush()
60 tx_ring->wr_ptr_add = 0; in nfp_net_tx_xmit_more_flush()
64 nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp) in nfp_net_read_tx_cmpl() argument
66 if (tx_ring->txrwb) in nfp_net_read_tx_cmpl()
67 return *tx_ring->txrwb; in nfp_net_read_tx_cmpl()
68 return nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_net_read_tx_cmpl()
100 struct nfp_net_tx_ring *tx_ring, unsigned int idx);
[all …]
Dnfp_net_debugfs.c83 struct nfp_net_tx_ring *tx_ring; in nfp_tx_q_show() local
90 tx_ring = r_vec->tx_ring; in nfp_tx_q_show()
92 tx_ring = r_vec->xdp_ring; in nfp_tx_q_show()
93 if (!r_vec->nfp_net || !tx_ring) in nfp_tx_q_show()
99 d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); in nfp_tx_q_show()
100 d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); in nfp_tx_q_show()
103 tx_ring->idx, tx_ring->qcidx, in nfp_tx_q_show()
104 tx_ring == r_vec->tx_ring ? "" : "xdp", in nfp_tx_q_show()
105 tx_ring->cnt, &tx_ring->dma, tx_ring->txds, in nfp_tx_q_show()
106 tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); in nfp_tx_q_show()
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/ice/
Dice_txrx.c39 struct ice_tx_ring *tx_ring; in ice_prgm_fdir_fltr() local
48 tx_ring = vsi->tx_rings[0]; in ice_prgm_fdir_fltr()
49 if (!tx_ring || !tx_ring->desc) in ice_prgm_fdir_fltr()
51 dev = tx_ring->dev; in ice_prgm_fdir_fltr()
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { in ice_prgm_fdir_fltr()
67 i = tx_ring->next_to_use; in ice_prgm_fdir_fltr()
68 first = &tx_ring->tx_buf[i]; in ice_prgm_fdir_fltr()
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i); in ice_prgm_fdir_fltr()
73 i = (i < tx_ring->count) ? i : 0; in ice_prgm_fdir_fltr()
74 tx_desc = ICE_TX_DESC(tx_ring, i); in ice_prgm_fdir_fltr()
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/iavf/
Diavf_txrx.c57 void iavf_clean_tx_ring(struct iavf_ring *tx_ring) in iavf_clean_tx_ring() argument
63 if (!tx_ring->tx_bi) in iavf_clean_tx_ring()
67 for (i = 0; i < tx_ring->count; i++) in iavf_clean_tx_ring()
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); in iavf_clean_tx_ring()
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count; in iavf_clean_tx_ring()
71 memset(tx_ring->tx_bi, 0, bi_size); in iavf_clean_tx_ring()
74 memset(tx_ring->desc, 0, tx_ring->size); in iavf_clean_tx_ring()
76 tx_ring->next_to_use = 0; in iavf_clean_tx_ring()
77 tx_ring->next_to_clean = 0; in iavf_clean_tx_ring()
79 if (!tx_ring->netdev) in iavf_clean_tx_ring()
[all …]
/linux-5.19.10/drivers/infiniband/hw/hfi1/
Dipoib_tx.c49 return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, in hfi1_ipoib_used()
50 txq->tx_ring.complete_txreqs); in hfi1_ipoib_used()
56 if (atomic_inc_return(&txq->tx_ring.stops) == 1) in hfi1_ipoib_stop_txq()
63 if (atomic_dec_and_test(&txq->tx_ring.stops)) in hfi1_ipoib_wake_txq()
70 txq->tx_ring.max_items - 1); in hfi1_ipoib_ring_hwat()
76 txq->tx_ring.max_items) >> 1; in hfi1_ipoib_ring_lwat()
81 ++txq->tx_ring.sent_txreqs; in hfi1_ipoib_check_queue_depth()
83 !atomic_xchg(&txq->tx_ring.ring_full, 1)) { in hfi1_ipoib_check_queue_depth()
108 atomic_xchg(&txq->tx_ring.ring_full, 0)) { in hfi1_ipoib_check_queue_stopped()
136 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; in hfi1_ipoib_drain_tx_ring() local
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/fm10k/
Dfm10k_main.c735 static int fm10k_tso(struct fm10k_ring *tx_ring, in fm10k_tso() argument
768 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); in fm10k_tso()
775 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; in fm10k_tso()
777 netdev_err(tx_ring->netdev, in fm10k_tso()
782 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, in fm10k_tx_csum() argument
804 dev_warn(tx_ring->dev, in fm10k_tx_csum()
806 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
847 dev_warn(tx_ring->dev, in fm10k_tx_csum()
852 tx_ring->tx_stats.csum_err++; in fm10k_tx_csum()
858 tx_ring->tx_stats.csum_good++; in fm10k_tx_csum()
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/i40e/
Di40e_txrx_common.h44 static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, in i40e_update_tx_stats() argument
48 u64_stats_update_begin(&tx_ring->syncp); in i40e_update_tx_stats()
49 tx_ring->stats.bytes += total_bytes; in i40e_update_tx_stats()
50 tx_ring->stats.packets += total_packets; in i40e_update_tx_stats()
51 u64_stats_update_end(&tx_ring->syncp); in i40e_update_tx_stats()
52 tx_ring->q_vector->tx.total_bytes += total_bytes; in i40e_update_tx_stats()
53 tx_ring->q_vector->tx.total_packets += total_packets; in i40e_update_tx_stats()
64 static inline void i40e_arm_wb(struct i40e_ring *tx_ring, in i40e_arm_wb() argument
68 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { in i40e_arm_wb()
74 unsigned int j = i40e_get_tx_pending(tx_ring, false); in i40e_arm_wb()
[all …]
Di40e_txrx.c22 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument
26 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
31 i = tx_ring->next_to_use; in i40e_fdir()
32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
92 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
104 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
105 dev = tx_ring->dev; in i40e_program_fdir_filter()
108 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
120 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
[all …]
Di40e_xsk.c515 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, in i40e_clean_xdp_tx_buffer() argument
519 tx_ring->xdp_tx_active--; in i40e_clean_xdp_tx_buffer()
520 dma_unmap_single(tx_ring->dev, in i40e_clean_xdp_tx_buffer()
533 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) in i40e_clean_xdp_tx_irq() argument
535 struct xsk_buff_pool *bp = tx_ring->xsk_pool; in i40e_clean_xdp_tx_irq()
537 u32 head_idx = i40e_get_head(tx_ring); in i40e_clean_xdp_tx_irq()
541 if (head_idx < tx_ring->next_to_clean) in i40e_clean_xdp_tx_irq()
542 head_idx += tx_ring->count; in i40e_clean_xdp_tx_irq()
543 completed_frames = head_idx - tx_ring->next_to_clean; in i40e_clean_xdp_tx_irq()
548 if (likely(!tx_ring->xdp_tx_active)) { in i40e_clean_xdp_tx_irq()
[all …]
/linux-5.19.10/drivers/net/ethernet/freescale/enetc/
Denetc.c27 struct enetc_bdr *tx_ring) in enetc_rx_ring_from_xdp_tx_ring() argument
29 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
51 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, in enetc_unmap_tx_buff() argument
59 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
63 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
68 static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, in enetc_free_tx_frame() argument
75 enetc_unmap_tx_buff(tx_ring, tx_swbd); in enetc_free_tx_frame()
87 static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) in enetc_update_tx_ring_tail() argument
90 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
126 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) in enetc_map_tx_buffs() argument
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/igbvf/
Dnetdev.c418 struct igbvf_ring *tx_ring) in igbvf_setup_tx_resources() argument
423 size = sizeof(struct igbvf_buffer) * tx_ring->count; in igbvf_setup_tx_resources()
424 tx_ring->buffer_info = vzalloc(size); in igbvf_setup_tx_resources()
425 if (!tx_ring->buffer_info) in igbvf_setup_tx_resources()
429 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); in igbvf_setup_tx_resources()
430 tx_ring->size = ALIGN(tx_ring->size, 4096); in igbvf_setup_tx_resources()
432 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, in igbvf_setup_tx_resources()
433 &tx_ring->dma, GFP_KERNEL); in igbvf_setup_tx_resources()
434 if (!tx_ring->desc) in igbvf_setup_tx_resources()
437 tx_ring->adapter = adapter; in igbvf_setup_tx_resources()
[all …]
/linux-5.19.10/drivers/net/can/spi/mcp251xfd/
Dmcp251xfd-tx.c21 mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_get_tx_obj_next() argument
25 tx_head = mcp251xfd_get_tx_head(tx_ring); in mcp251xfd_get_tx_obj_next()
27 return &tx_ring->obj[tx_head]; in mcp251xfd_get_tx_obj_next()
141 struct mcp251xfd_tx_ring *tx_ring) in mcp251xfd_tx_busy() argument
143 if (mcp251xfd_get_tx_free(tx_ring) > 0) in mcp251xfd_tx_busy()
151 if (mcp251xfd_get_tx_free(tx_ring) == 0) { in mcp251xfd_tx_busy()
154 tx_ring->head, tx_ring->tail, in mcp251xfd_tx_busy()
155 tx_ring->head - tx_ring->tail); in mcp251xfd_tx_busy()
169 struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_start_xmit() local
178 if (mcp251xfd_tx_busy(priv, tx_ring)) in mcp251xfd_start_xmit()
[all …]
Dmcp251xfd-tef.c61 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_handle_tefif_recover() local
80 seq, priv->tef->tail, priv->tef->head, tx_ring->head); in mcp251xfd_handle_tefif_recover()
125 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tef_ring_update() local
137 new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; in mcp251xfd_tef_ring_update()
139 new_head += tx_ring->obj_num; in mcp251xfd_tef_ring_update()
142 priv->tef->head = min(new_head, tx_ring->head); in mcp251xfd_tef_ring_update()
152 const struct mcp251xfd_tx_ring *tx_ring = priv->tx; in mcp251xfd_tef_obj_read() local
156 (offset > tx_ring->obj_num || in mcp251xfd_tef_obj_read()
157 len > tx_ring->obj_num || in mcp251xfd_tef_obj_read()
158 offset + len > tx_ring->obj_num)) { in mcp251xfd_tef_obj_read()
[all …]
/linux-5.19.10/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_io.c271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) in qlcnic_82xx_change_filter() argument
280 producer = tx_ring->producer; in qlcnic_82xx_change_filter()
281 hwdesc = &tx_ring->desc_head[tx_ring->producer]; in qlcnic_82xx_change_filter()
297 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); in qlcnic_82xx_change_filter()
304 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_send_filter() argument
338 vlan_id, tx_ring); in qlcnic_send_filter()
353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); in qlcnic_send_filter()
373 struct qlcnic_host_tx_ring *tx_ring) in qlcnic_tx_encap_pkt() argument
377 u32 producer = tx_ring->producer; in qlcnic_tx_encap_pkt()
407 hwdesc = &tx_ring->desc_head[producer]; in qlcnic_tx_encap_pkt()
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/igc/
Digc_xdp.c43 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_enable_pool() local
72 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_enable_pool()
78 igc_disable_tx_ring(tx_ring); in igc_xdp_enable_pool()
83 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_enable_pool()
88 igc_enable_tx_ring(tx_ring); in igc_xdp_enable_pool()
102 struct igc_ring *rx_ring, *tx_ring; in igc_xdp_disable_pool() local
118 tx_ring = adapter->tx_ring[queue_id]; in igc_xdp_disable_pool()
124 igc_disable_tx_ring(tx_ring); in igc_xdp_disable_pool()
130 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags); in igc_xdp_disable_pool()
135 igc_enable_tx_ring(tx_ring); in igc_xdp_disable_pool()
Digc_dump.c118 struct igc_ring *tx_ring; in igc_rings_dump() local
138 tx_ring = adapter->tx_ring[n]; in igc_rings_dump()
139 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; in igc_rings_dump()
142 n, tx_ring->next_to_use, tx_ring->next_to_clean, in igc_rings_dump()
167 tx_ring = adapter->tx_ring[n]; in igc_rings_dump()
170 tx_ring->queue_index); in igc_rings_dump()
174 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { in igc_rings_dump()
178 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_rings_dump()
179 buffer_info = &tx_ring->tx_buffer_info[i]; in igc_rings_dump()
181 if (i == tx_ring->next_to_use && in igc_rings_dump()
[all …]
/linux-5.19.10/drivers/net/ethernet/mscc/
Docelot_fdma.c70 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_free() local
72 if (tx_ring->next_to_use >= tx_ring->next_to_clean) in ocelot_fdma_tx_ring_free()
74 (tx_ring->next_to_use - tx_ring->next_to_clean) - 1; in ocelot_fdma_tx_ring_free()
76 return tx_ring->next_to_clean - tx_ring->next_to_use - 1; in ocelot_fdma_tx_ring_free()
81 struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_ring_empty() local
83 return tx_ring->next_to_clean == tx_ring->next_to_use; in ocelot_fdma_tx_ring_empty()
484 struct ocelot_fdma_tx_ring *tx_ring; in ocelot_fdma_tx_cleanup() local
495 tx_ring = &fdma->tx_ring; in ocelot_fdma_tx_cleanup()
501 ntc = tx_ring->next_to_clean; in ocelot_fdma_tx_cleanup()
502 dcb = &tx_ring->dcbs[ntc]; in ocelot_fdma_tx_cleanup()
[all …]
/linux-5.19.10/drivers/net/ethernet/broadcom/
Dbcm4908_enet.c74 struct bcm4908_enet_dma_ring tx_ring; member
187 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_free() local
197 size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd); in bcm4908_enet_dma_free()
198 if (tx_ring->cpu_addr) in bcm4908_enet_dma_free()
199 dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr); in bcm4908_enet_dma_free()
200 kfree(tx_ring->slots); in bcm4908_enet_dma_free()
205 struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; in bcm4908_enet_dma_alloc() local
210 tx_ring->length = ENET_TX_BDS_NUM; in bcm4908_enet_dma_alloc()
211 tx_ring->is_tx = 1; in bcm4908_enet_dma_alloc()
212 tx_ring->cfg_block = ENET_DMA_CH_TX_CFG; in bcm4908_enet_dma_alloc()
[all …]
/linux-5.19.10/drivers/net/ethernet/amazon/ena/
Dena_netdev.c69 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
71 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
73 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
87 static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) in ena_ring_tx_doorbell() argument
89 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_ring_tx_doorbell()
90 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); in ena_ring_tx_doorbell()
354 xdp_ring = &adapter->tx_ring[qid]; in ena_xdp_xmit()
689 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
719 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
732 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
[all …]
/linux-5.19.10/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf_main.c210 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) in ixgbevf_check_tx_hang() argument
212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang()
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang()
216 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang()
226 &tx_ring->state); in ixgbevf_check_tx_hang()
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
232 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
265 struct ixgbevf_ring *tx_ring, int napi_budget) in ixgbevf_clean_tx_irq() argument
271 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
[all …]

12345678910>>...13