Lines Matching refs:tx_spare
963 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_space() local
969 ntc = smp_load_acquire(&tx_spare->last_to_clean); in hns3_tx_spare_space()
970 ntu = tx_spare->next_to_use; in hns3_tx_spare_space()
978 return max(ntc, tx_spare->len - ntu) - 1; in hns3_tx_spare_space()
983 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_update() local
985 if (!tx_spare || in hns3_tx_spare_update()
986 tx_spare->last_to_clean == tx_spare->next_to_clean) in hns3_tx_spare_update()
992 smp_store_release(&tx_spare->last_to_clean, in hns3_tx_spare_update()
993 tx_spare->next_to_clean); in hns3_tx_spare_update()
1034 struct hns3_tx_spare *tx_spare; in hns3_init_tx_spare_buffer() local
1049 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), in hns3_init_tx_spare_buffer()
1051 if (!tx_spare) { in hns3_init_tx_spare_buffer()
1071 tx_spare->dma = dma; in hns3_init_tx_spare_buffer()
1072 tx_spare->buf = page_address(page); in hns3_init_tx_spare_buffer()
1073 tx_spare->len = PAGE_SIZE << order; in hns3_init_tx_spare_buffer()
1074 ring->tx_spare = tx_spare; in hns3_init_tx_spare_buffer()
1080 devm_kfree(ring_to_dev(ring), tx_spare); in hns3_init_tx_spare_buffer()
1092 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_alloc() local
1093 u32 ntu = tx_spare->next_to_use; in hns3_tx_spare_alloc()
1101 if (ntu + size > tx_spare->len) { in hns3_tx_spare_alloc()
1102 *cb_len += (tx_spare->len - ntu); in hns3_tx_spare_alloc()
1106 tx_spare->next_to_use = ntu + size; in hns3_tx_spare_alloc()
1107 if (tx_spare->next_to_use == tx_spare->len) in hns3_tx_spare_alloc()
1108 tx_spare->next_to_use = 0; in hns3_tx_spare_alloc()
1110 *dma = tx_spare->dma + ntu; in hns3_tx_spare_alloc()
1112 return tx_spare->buf + ntu; in hns3_tx_spare_alloc()
1117 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_rollback() local
1119 if (len > tx_spare->next_to_use) { in hns3_tx_spare_rollback()
1120 len -= tx_spare->next_to_use; in hns3_tx_spare_rollback()
1121 tx_spare->next_to_use = tx_spare->len - len; in hns3_tx_spare_rollback()
1123 tx_spare->next_to_use -= len; in hns3_tx_spare_rollback()
1130 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_reclaim_cb() local
1131 u32 ntc = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1134 tx_spare->next_to_clean += len; in hns3_tx_spare_reclaim_cb()
1136 if (tx_spare->next_to_clean >= tx_spare->len) { in hns3_tx_spare_reclaim_cb()
1137 tx_spare->next_to_clean -= tx_spare->len; in hns3_tx_spare_reclaim_cb()
1139 if (tx_spare->next_to_clean) { in hns3_tx_spare_reclaim_cb()
1141 len = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1151 dma_addr_t dma = tx_spare->dma + ntc; in hns3_tx_spare_reclaim_cb()
1156 struct sg_table *sgt = tx_spare->buf + ntc; in hns3_tx_spare_reclaim_cb()
2249 if (!ring->tx_spare) in hns3_handle_desc_filling()
5000 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { in hns3_fini_ring()
5001 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_fini_ring() local
5003 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, in hns3_fini_ring()
5005 free_pages((unsigned long)tx_spare->buf, in hns3_fini_ring()
5006 get_order(tx_spare->len)); in hns3_fini_ring()
5007 devm_kfree(ring_to_dev(ring), tx_spare); in hns3_fini_ring()
5008 ring->tx_spare = NULL; in hns3_fini_ring()