Lines Matching refs:dp
17 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) in nfp_net_rx_alloc_one() argument
21 if (!dp->xdp_prog) { in nfp_net_rx_alloc_one()
22 frag = netdev_alloc_frag(dp->fl_bufsz); in nfp_net_rx_alloc_one()
30 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); in nfp_net_rx_alloc_one()
34 *dma_addr = nfp_net_dma_map_rx(dp, frag); in nfp_net_rx_alloc_one()
35 if (dma_mapping_error(dp->dev, *dma_addr)) { in nfp_net_rx_alloc_one()
36 nfp_net_free_frag(frag, dp->xdp_prog); in nfp_net_rx_alloc_one()
37 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); in nfp_net_rx_alloc_one()
53 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp, in nfp_net_tx_ring_init() argument
65 tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL; in nfp_net_tx_ring_init()
132 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_free() argument
137 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) in nfp_net_rx_ring_bufs_free()
148 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_free()
149 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); in nfp_net_rx_ring_bufs_free()
161 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, in nfp_net_rx_ring_bufs_alloc() argument
167 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) in nfp_net_rx_ring_bufs_alloc()
173 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); in nfp_net_rx_ring_bufs_alloc()
175 nfp_net_rx_ring_bufs_free(dp, rx_ring); in nfp_net_rx_ring_bufs_alloc()
183 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_tx_rings_prepare() argument
187 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), in nfp_net_tx_rings_prepare()
189 if (!dp->tx_rings) in nfp_net_tx_rings_prepare()
192 if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) { in nfp_net_tx_rings_prepare()
193 dp->txrwb = dma_alloc_coherent(dp->dev, in nfp_net_tx_rings_prepare()
194 dp->num_tx_rings * sizeof(u64), in nfp_net_tx_rings_prepare()
195 &dp->txrwb_dma, GFP_KERNEL); in nfp_net_tx_rings_prepare()
196 if (!dp->txrwb) in nfp_net_tx_rings_prepare()
200 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_prepare()
203 if (r >= dp->num_stack_tx_rings) in nfp_net_tx_rings_prepare()
204 bias = dp->num_stack_tx_rings; in nfp_net_tx_rings_prepare()
206 nfp_net_tx_ring_init(&dp->tx_rings[r], dp, in nfp_net_tx_rings_prepare()
209 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
212 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) in nfp_net_tx_rings_prepare()
220 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
222 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_prepare()
224 if (dp->txrwb) in nfp_net_tx_rings_prepare()
225 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64), in nfp_net_tx_rings_prepare()
226 dp->txrwb, dp->txrwb_dma); in nfp_net_tx_rings_prepare()
228 kfree(dp->tx_rings); in nfp_net_tx_rings_prepare()
232 void nfp_net_tx_rings_free(struct nfp_net_dp *dp) in nfp_net_tx_rings_free() argument
236 for (r = 0; r < dp->num_tx_rings; r++) { in nfp_net_tx_rings_free()
237 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free()
238 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]); in nfp_net_tx_rings_free()
241 if (dp->txrwb) in nfp_net_tx_rings_free()
242 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64), in nfp_net_tx_rings_free()
243 dp->txrwb, dp->txrwb_dma); in nfp_net_tx_rings_free()
244 kfree(dp->tx_rings); in nfp_net_tx_rings_free()
254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; in nfp_net_rx_ring_free() local
256 if (dp->netdev) in nfp_net_rx_ring_free()
259 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) in nfp_net_rx_ring_free()
265 dma_free_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_free()
284 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) in nfp_net_rx_ring_alloc() argument
290 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) { in nfp_net_rx_ring_alloc()
298 if (dp->netdev) { in nfp_net_rx_ring_alloc()
299 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, in nfp_net_rx_ring_alloc()
309 rx_ring->cnt = dp->rxd_cnt; in nfp_net_rx_ring_alloc()
311 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size, in nfp_net_rx_ring_alloc()
315 …netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count:… in nfp_net_rx_ring_alloc()
320 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) { in nfp_net_rx_ring_alloc()
339 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_rx_rings_prepare() argument
343 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), in nfp_net_rx_rings_prepare()
345 if (!dp->rx_rings) in nfp_net_rx_rings_prepare()
348 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_prepare()
349 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); in nfp_net_rx_rings_prepare()
351 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
354 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) in nfp_net_rx_rings_prepare()
362 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
364 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_prepare()
366 kfree(dp->rx_rings); in nfp_net_rx_rings_prepare()
370 void nfp_net_rx_rings_free(struct nfp_net_dp *dp) in nfp_net_rx_rings_free() argument
374 for (r = 0; r < dp->num_rx_rings; r++) { in nfp_net_rx_rings_free()
375 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); in nfp_net_rx_rings_free()
376 nfp_net_rx_ring_free(&dp->rx_rings[r]); in nfp_net_rx_rings_free()
379 kfree(dp->rx_rings); in nfp_net_rx_rings_free()
400 nn->dp.txrwb_dma + idx * sizeof(u64)); in nfp_net_tx_ring_hw_cfg_write()
422 return nn->dp.ops->xmit(skb, netdev); in nfp_net_tx()
429 return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false); in __nfp_ctrl_tx()
438 ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false); in nfp_ctrl_tx()