Lines Matching refs:tx_ring

210 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)  in ixgbevf_check_tx_hang()  argument
212 u32 tx_done = ixgbevf_get_tx_completed(tx_ring); in ixgbevf_check_tx_hang()
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
214 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); in ixgbevf_check_tx_hang()
216 clear_check_for_tx_hang(tx_ring); in ixgbevf_check_tx_hang()
226 &tx_ring->state); in ixgbevf_check_tx_hang()
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
232 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
265 struct ixgbevf_ring *tx_ring, int napi_budget) in ixgbevf_clean_tx_irq() argument
271 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
272 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq()
277 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq()
278 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_clean_tx_irq()
279 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
305 if (ring_is_xdp(tx_ring)) in ixgbevf_clean_tx_irq()
311 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq()
325 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
326 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
327 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_clean_tx_irq()
332 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_irq()
345 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
346 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
347 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_clean_tx_irq()
357 i += tx_ring->count; in ixgbevf_clean_tx_irq()
358 tx_ring->next_to_clean = i; in ixgbevf_clean_tx_irq()
359 u64_stats_update_begin(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
360 tx_ring->stats.bytes += total_bytes; in ixgbevf_clean_tx_irq()
361 tx_ring->stats.packets += total_packets; in ixgbevf_clean_tx_irq()
362 u64_stats_update_end(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
367 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { in ixgbevf_clean_tx_irq()
371 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; in ixgbevf_clean_tx_irq()
383 ring_is_xdp(tx_ring) ? " XDP" : "", in ixgbevf_clean_tx_irq()
384 tx_ring->queue_index, in ixgbevf_clean_tx_irq()
385 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
386 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
387 tx_ring->next_to_use, i, in ixgbevf_clean_tx_irq()
389 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbevf_clean_tx_irq()
391 if (!ring_is_xdp(tx_ring)) in ixgbevf_clean_tx_irq()
392 netif_stop_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
393 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
401 if (ring_is_xdp(tx_ring)) in ixgbevf_clean_tx_irq()
405 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbevf_clean_tx_irq()
406 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { in ixgbevf_clean_tx_irq()
412 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbevf_clean_tx_irq()
413 tx_ring->queue_index) && in ixgbevf_clean_tx_irq()
415 netif_wake_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
416 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
417 ++tx_ring->tx_stats.restart_queue; in ixgbevf_clean_tx_irq()
1760 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
2196 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
2399 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) in ixgbevf_clean_tx_ring() argument
2401 u16 i = tx_ring->next_to_clean; in ixgbevf_clean_tx_ring()
2402 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_ring()
2404 while (i != tx_ring->next_to_use) { in ixgbevf_clean_tx_ring()
2408 if (ring_is_xdp(tx_ring)) in ixgbevf_clean_tx_ring()
2414 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_ring()
2421 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_clean_tx_ring()
2428 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2430 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2431 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_clean_tx_ring()
2436 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_ring()
2445 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2447 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2452 tx_ring->next_to_use = 0; in ixgbevf_clean_tx_ring()
2453 tx_ring->next_to_clean = 0; in ixgbevf_clean_tx_ring()
2478 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2513 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2763 adapter->tx_ring[txr_idx] = ring; in ixgbevf_alloc_q_vector()
2851 adapter->tx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
3223 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
3367 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) in ixgbevf_free_tx_resources() argument
3369 ixgbevf_clean_tx_ring(tx_ring); in ixgbevf_free_tx_resources()
3371 vfree(tx_ring->tx_buffer_info); in ixgbevf_free_tx_resources()
3372 tx_ring->tx_buffer_info = NULL; in ixgbevf_free_tx_resources()
3375 if (!tx_ring->desc) in ixgbevf_free_tx_resources()
3378 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, in ixgbevf_free_tx_resources()
3379 tx_ring->dma); in ixgbevf_free_tx_resources()
3381 tx_ring->desc = NULL; in ixgbevf_free_tx_resources()
3395 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3396 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
3408 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) in ixgbevf_setup_tx_resources() argument
3410 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); in ixgbevf_setup_tx_resources()
3413 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_setup_tx_resources()
3414 tx_ring->tx_buffer_info = vmalloc(size); in ixgbevf_setup_tx_resources()
3415 if (!tx_ring->tx_buffer_info) in ixgbevf_setup_tx_resources()
3418 u64_stats_init(&tx_ring->syncp); in ixgbevf_setup_tx_resources()
3421 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbevf_setup_tx_resources()
3422 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbevf_setup_tx_resources()
3424 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, in ixgbevf_setup_tx_resources()
3425 &tx_ring->dma, GFP_KERNEL); in ixgbevf_setup_tx_resources()
3426 if (!tx_ring->desc) in ixgbevf_setup_tx_resources()
3432 vfree(tx_ring->tx_buffer_info); in ixgbevf_setup_tx_resources()
3433 tx_ring->tx_buffer_info = NULL; in ixgbevf_setup_tx_resources()
3453 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3474 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3743 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_ctxtdesc() argument
3748 u16 i = tx_ring->next_to_use; in ixgbevf_tx_ctxtdesc()
3750 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); in ixgbevf_tx_ctxtdesc()
3753 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbevf_tx_ctxtdesc()
3764 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, in ixgbevf_tso() argument
3855 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, in ixgbevf_tso()
3861 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_csum() argument
3906 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, in ixgbevf_tx_csum()
3957 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, in ixgbevf_tx_map() argument
3969 u16 i = tx_ring->next_to_use; in ixgbevf_tx_map()
3971 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); in ixgbevf_tx_map()
3978 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbevf_tx_map()
3983 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3998 if (i == tx_ring->count) { in ixgbevf_tx_map()
3999 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_tx_map()
4017 if (i == tx_ring->count) { in ixgbevf_tx_map()
4018 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); in ixgbevf_tx_map()
4026 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbevf_tx_map()
4029 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4054 if (i == tx_ring->count) in ixgbevf_tx_map()
4057 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4060 ixgbevf_write_tail(tx_ring, i); in ixgbevf_tx_map()
4064 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbevf_tx_map()
4065 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4070 dma_unmap_page(tx_ring->dev, in ixgbevf_tx_map()
4077 i += tx_ring->count; in ixgbevf_tx_map()
4078 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4082 dma_unmap_single(tx_ring->dev, in ixgbevf_tx_map()
4091 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4094 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) in __ixgbevf_maybe_stop_tx() argument
4096 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4106 if (likely(ixgbevf_desc_unused(tx_ring) < size)) in __ixgbevf_maybe_stop_tx()
4110 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4111 ++tx_ring->tx_stats.restart_queue; in __ixgbevf_maybe_stop_tx()
4116 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) in ixgbevf_maybe_stop_tx() argument
4118 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) in ixgbevf_maybe_stop_tx()
4120 return __ixgbevf_maybe_stop_tx(tx_ring, size); in ixgbevf_maybe_stop_tx()
4124 struct ixgbevf_ring *tx_ring) in ixgbevf_xmit_frame_ring() argument
4157 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { in ixgbevf_xmit_frame_ring()
4158 tx_ring->tx_stats.tx_busy++; in ixgbevf_xmit_frame_ring()
4163 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbevf_xmit_frame_ring()
4179 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) in ixgbevf_xmit_frame_ring()
4182 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); in ixgbevf_xmit_frame_ring()
4186 ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); in ixgbevf_xmit_frame_ring()
4188 ixgbevf_tx_map(tx_ring, first, hdr_len); in ixgbevf_xmit_frame_ring()
4190 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); in ixgbevf_xmit_frame_ring()
4204 struct ixgbevf_ring *tx_ring; in ixgbevf_xmit_frame() local
4220 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
4221 return ixgbevf_xmit_frame_ring(skb, tx_ring); in ixgbevf_xmit_frame()
4388 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()