1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
3
4 #include "nfp_app.h"
5 #include "nfp_net_dp.h"
6 #include "nfp_net_xsk.h"
7
8 /**
9 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
10 * @dp: NFP Net data path struct
11 * @dma_addr: Pointer to storage for DMA address (output param)
12 *
13 * This function will allcate a new page frag, map it for DMA.
14 *
15 * Return: allocated page frag or NULL on failure.
16 */
nfp_net_rx_alloc_one(struct nfp_net_dp * dp,dma_addr_t * dma_addr)17 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
18 {
19 void *frag;
20
21 if (!dp->xdp_prog) {
22 frag = netdev_alloc_frag(dp->fl_bufsz);
23 } else {
24 struct page *page;
25
26 page = alloc_page(GFP_KERNEL);
27 frag = page ? page_address(page) : NULL;
28 }
29 if (!frag) {
30 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
31 return NULL;
32 }
33
34 *dma_addr = nfp_net_dma_map_rx(dp, frag);
35 if (dma_mapping_error(dp->dev, *dma_addr)) {
36 nfp_net_free_frag(frag, dp->xdp_prog);
37 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
38 return NULL;
39 }
40
41 return frag;
42 }
43
44 /**
45 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
46 * @tx_ring: TX ring structure
47 * @dp: NFP Net data path struct
48 * @r_vec: IRQ vector servicing this ring
49 * @idx: Ring index
50 * @is_xdp: Is this an XDP TX ring?
51 */
52 static void
nfp_net_tx_ring_init(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,unsigned int idx,bool is_xdp)53 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
54 struct nfp_net_r_vector *r_vec, unsigned int idx,
55 bool is_xdp)
56 {
57 struct nfp_net *nn = r_vec->nfp_net;
58
59 tx_ring->idx = idx;
60 tx_ring->r_vec = r_vec;
61 tx_ring->is_xdp = is_xdp;
62 u64_stats_init(&tx_ring->r_vec->tx_sync);
63
64 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
65 tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
66 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
67 }
68
69 /**
70 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
71 * @rx_ring: RX ring structure
72 * @r_vec: IRQ vector servicing this ring
73 * @idx: Ring index
74 */
75 static void
nfp_net_rx_ring_init(struct nfp_net_rx_ring * rx_ring,struct nfp_net_r_vector * r_vec,unsigned int idx)76 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
77 struct nfp_net_r_vector *r_vec, unsigned int idx)
78 {
79 struct nfp_net *nn = r_vec->nfp_net;
80
81 rx_ring->idx = idx;
82 rx_ring->r_vec = r_vec;
83 u64_stats_init(&rx_ring->r_vec->rx_sync);
84
85 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
86 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
87 }
88
89 /**
90 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
91 * @rx_ring: RX ring structure
92 *
93 * Assumes that the device is stopped, must be idempotent.
94 */
nfp_net_rx_ring_reset(struct nfp_net_rx_ring * rx_ring)95 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
96 {
97 unsigned int wr_idx, last_idx;
98
99 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
100 * kept at cnt - 1 FL bufs.
101 */
102 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
103 return;
104
105 /* Move the empty entry to the end of the list */
106 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
107 last_idx = rx_ring->cnt - 1;
108 if (rx_ring->r_vec->xsk_pool) {
109 rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
110 memset(&rx_ring->xsk_rxbufs[last_idx], 0,
111 sizeof(*rx_ring->xsk_rxbufs));
112 } else {
113 rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
114 memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
115 }
116
117 memset(rx_ring->rxds, 0, rx_ring->size);
118 rx_ring->wr_p = 0;
119 rx_ring->rd_p = 0;
120 }
121
122 /**
123 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
124 * @dp: NFP Net data path struct
125 * @rx_ring: RX ring to remove buffers from
126 *
127 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
128 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
129 * to restore required ring geometry.
130 */
131 static void
nfp_net_rx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)132 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
133 struct nfp_net_rx_ring *rx_ring)
134 {
135 unsigned int i;
136
137 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
138 return;
139
140 for (i = 0; i < rx_ring->cnt - 1; i++) {
141 /* NULL skb can only happen when initial filling of the ring
142 * fails to allocate enough buffers and calls here to free
143 * already allocated ones.
144 */
145 if (!rx_ring->rxbufs[i].frag)
146 continue;
147
148 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
149 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
150 rx_ring->rxbufs[i].dma_addr = 0;
151 rx_ring->rxbufs[i].frag = NULL;
152 }
153 }
154
155 /**
156 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
157 * @dp: NFP Net data path struct
158 * @rx_ring: RX ring to remove buffers from
159 */
160 static int
nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)161 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
162 struct nfp_net_rx_ring *rx_ring)
163 {
164 struct nfp_net_rx_buf *rxbufs;
165 unsigned int i;
166
167 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
168 return 0;
169
170 rxbufs = rx_ring->rxbufs;
171
172 for (i = 0; i < rx_ring->cnt - 1; i++) {
173 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
174 if (!rxbufs[i].frag) {
175 nfp_net_rx_ring_bufs_free(dp, rx_ring);
176 return -ENOMEM;
177 }
178 }
179
180 return 0;
181 }
182
nfp_net_tx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)183 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
184 {
185 unsigned int r;
186
187 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
188 GFP_KERNEL);
189 if (!dp->tx_rings)
190 return -ENOMEM;
191
192 if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
193 dp->txrwb = dma_alloc_coherent(dp->dev,
194 dp->num_tx_rings * sizeof(u64),
195 &dp->txrwb_dma, GFP_KERNEL);
196 if (!dp->txrwb)
197 goto err_free_rings;
198 }
199
200 for (r = 0; r < dp->num_tx_rings; r++) {
201 int bias = 0;
202
203 if (r >= dp->num_stack_tx_rings)
204 bias = dp->num_stack_tx_rings;
205
206 nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
207 &nn->r_vecs[r - bias], r, bias);
208
209 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
210 goto err_free_prev;
211
212 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
213 goto err_free_ring;
214 }
215
216 return 0;
217
218 err_free_prev:
219 while (r--) {
220 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
221 err_free_ring:
222 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
223 }
224 if (dp->txrwb)
225 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
226 dp->txrwb, dp->txrwb_dma);
227 err_free_rings:
228 kfree(dp->tx_rings);
229 return -ENOMEM;
230 }
231
nfp_net_tx_rings_free(struct nfp_net_dp * dp)232 void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
233 {
234 unsigned int r;
235
236 for (r = 0; r < dp->num_tx_rings; r++) {
237 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
238 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
239 }
240
241 if (dp->txrwb)
242 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
243 dp->txrwb, dp->txrwb_dma);
244 kfree(dp->tx_rings);
245 }
246
247 /**
248 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
249 * @rx_ring: RX ring to free
250 */
nfp_net_rx_ring_free(struct nfp_net_rx_ring * rx_ring)251 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
252 {
253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
255
256 if (dp->netdev)
257 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
258
259 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
260 kvfree(rx_ring->xsk_rxbufs);
261 else
262 kvfree(rx_ring->rxbufs);
263
264 if (rx_ring->rxds)
265 dma_free_coherent(dp->dev, rx_ring->size,
266 rx_ring->rxds, rx_ring->dma);
267
268 rx_ring->cnt = 0;
269 rx_ring->rxbufs = NULL;
270 rx_ring->xsk_rxbufs = NULL;
271 rx_ring->rxds = NULL;
272 rx_ring->dma = 0;
273 rx_ring->size = 0;
274 }
275
276 /**
277 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
278 * @dp: NFP Net data path struct
279 * @rx_ring: RX ring to allocate
280 *
281 * Return: 0 on success, negative errno otherwise.
282 */
283 static int
nfp_net_rx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)284 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
285 {
286 enum xdp_mem_type mem_type;
287 size_t rxbuf_sw_desc_sz;
288 int err;
289
290 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
291 mem_type = MEM_TYPE_XSK_BUFF_POOL;
292 rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
293 } else {
294 mem_type = MEM_TYPE_PAGE_ORDER0;
295 rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
296 }
297
298 if (dp->netdev) {
299 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
300 rx_ring->idx, rx_ring->r_vec->napi.napi_id);
301 if (err < 0)
302 return err;
303
304 err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
305 if (err)
306 goto err_alloc;
307 }
308
309 rx_ring->cnt = dp->rxd_cnt;
310 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
311 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
312 &rx_ring->dma,
313 GFP_KERNEL | __GFP_NOWARN);
314 if (!rx_ring->rxds) {
315 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
316 rx_ring->cnt);
317 goto err_alloc;
318 }
319
320 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
321 rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
322 GFP_KERNEL);
323 if (!rx_ring->xsk_rxbufs)
324 goto err_alloc;
325 } else {
326 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
327 GFP_KERNEL);
328 if (!rx_ring->rxbufs)
329 goto err_alloc;
330 }
331
332 return 0;
333
334 err_alloc:
335 nfp_net_rx_ring_free(rx_ring);
336 return -ENOMEM;
337 }
338
nfp_net_rx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)339 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
340 {
341 unsigned int r;
342
343 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
344 GFP_KERNEL);
345 if (!dp->rx_rings)
346 return -ENOMEM;
347
348 for (r = 0; r < dp->num_rx_rings; r++) {
349 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
350
351 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
352 goto err_free_prev;
353
354 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
355 goto err_free_ring;
356 }
357
358 return 0;
359
360 err_free_prev:
361 while (r--) {
362 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
363 err_free_ring:
364 nfp_net_rx_ring_free(&dp->rx_rings[r]);
365 }
366 kfree(dp->rx_rings);
367 return -ENOMEM;
368 }
369
nfp_net_rx_rings_free(struct nfp_net_dp * dp)370 void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
371 {
372 unsigned int r;
373
374 for (r = 0; r < dp->num_rx_rings; r++) {
375 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
376 nfp_net_rx_ring_free(&dp->rx_rings[r]);
377 }
378
379 kfree(dp->rx_rings);
380 }
381
382 void
nfp_net_rx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_rx_ring * rx_ring,unsigned int idx)383 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
384 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
385 {
386 /* Write the DMA address, size and MSI-X info to the device */
387 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
388 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
389 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
390 }
391
392 void
nfp_net_tx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_tx_ring * tx_ring,unsigned int idx)393 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
394 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
395 {
396 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
397 if (tx_ring->txrwb) {
398 *tx_ring->txrwb = 0;
399 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
400 nn->dp.txrwb_dma + idx * sizeof(u64));
401 }
402 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
403 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
404 }
405
nfp_net_vec_clear_ring_data(struct nfp_net * nn,unsigned int idx)406 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
407 {
408 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
409 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
410 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
411
412 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
413 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
414 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
415 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
416 }
417
nfp_net_tx(struct sk_buff * skb,struct net_device * netdev)418 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
419 {
420 struct nfp_net *nn = netdev_priv(netdev);
421
422 return nn->dp.ops->xmit(skb, netdev);
423 }
424
__nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)425 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
426 {
427 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
428
429 return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
430 }
431
nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)432 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
433 {
434 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
435 bool ret;
436
437 spin_lock_bh(&r_vec->lock);
438 ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
439 spin_unlock_bh(&r_vec->lock);
440
441 return ret;
442 }
443