1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <linux/stringify.h>
6 #include <net/xdp_sock_drv.h>
7 #include <net/xdp.h>
8
9 #include "i40e.h"
10 #include "i40e_txrx_common.h"
11 #include "i40e_xsk.h"
12
i40e_clear_rx_bi_zc(struct i40e_ring * rx_ring)13 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
14 {
15 memset(rx_ring->rx_bi_zc, 0,
16 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
17 }
18
i40e_rx_bi(struct i40e_ring * rx_ring,u32 idx)19 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
20 {
21 return &rx_ring->rx_bi_zc[idx];
22 }
23
24 /**
25 * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
26 * @rx_ring: Current rx ring
27 * @pool_present: is pool for XSK present
28 *
29 * Try allocating memory and return ENOMEM, if failed to allocate.
30 * If allocation was successful, substitute buffer with allocated one.
31 * Returns 0 on success, negative on failure
32 */
i40e_realloc_rx_xdp_bi(struct i40e_ring * rx_ring,bool pool_present)33 static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
34 {
35 size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
36 sizeof(*rx_ring->rx_bi);
37 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
38
39 if (!sw_ring)
40 return -ENOMEM;
41
42 if (pool_present) {
43 kfree(rx_ring->rx_bi);
44 rx_ring->rx_bi = NULL;
45 rx_ring->rx_bi_zc = sw_ring;
46 } else {
47 kfree(rx_ring->rx_bi_zc);
48 rx_ring->rx_bi_zc = NULL;
49 rx_ring->rx_bi = sw_ring;
50 }
51 return 0;
52 }
53
54 /**
55 * i40e_realloc_rx_bi_zc - reallocate rx SW rings
56 * @vsi: Current VSI
57 * @zc: is zero copy set
58 *
59 * Reallocate buffer for rx_rings that might be used by XSK.
60 * XDP requires more memory, than rx_buf provides.
61 * Returns 0 on success, negative on failure
62 */
i40e_realloc_rx_bi_zc(struct i40e_vsi * vsi,bool zc)63 int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
64 {
65 struct i40e_ring *rx_ring;
66 unsigned long q;
67
68 for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
69 rx_ring = vsi->rx_rings[q];
70 if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
71 return -ENOMEM;
72 }
73 return 0;
74 }
75
76 /**
77 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
78 * certain ring/qid
79 * @vsi: Current VSI
80 * @pool: buffer pool
81 * @qid: Rx ring to associate buffer pool with
82 *
83 * Returns 0 on success, <0 on failure
84 **/
i40e_xsk_pool_enable(struct i40e_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)85 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
86 struct xsk_buff_pool *pool,
87 u16 qid)
88 {
89 struct net_device *netdev = vsi->netdev;
90 bool if_running;
91 int err;
92
93 if (vsi->type != I40E_VSI_MAIN)
94 return -EINVAL;
95
96 if (qid >= vsi->num_queue_pairs)
97 return -EINVAL;
98
99 if (qid >= netdev->real_num_rx_queues ||
100 qid >= netdev->real_num_tx_queues)
101 return -EINVAL;
102
103 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
104 if (err)
105 return err;
106
107 set_bit(qid, vsi->af_xdp_zc_qps);
108
109 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
110
111 if (if_running) {
112 err = i40e_queue_pair_disable(vsi, qid);
113 if (err)
114 return err;
115
116 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
117 if (err)
118 return err;
119
120 err = i40e_queue_pair_enable(vsi, qid);
121 if (err)
122 return err;
123
124 /* Kick start the NAPI context so that receiving will start */
125 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
126 if (err)
127 return err;
128 }
129
130 return 0;
131 }
132
133 /**
134 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
135 * certain ring/qid
136 * @vsi: Current VSI
137 * @qid: Rx ring to associate buffer pool with
138 *
139 * Returns 0 on success, <0 on failure
140 **/
i40e_xsk_pool_disable(struct i40e_vsi * vsi,u16 qid)141 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
142 {
143 struct net_device *netdev = vsi->netdev;
144 struct xsk_buff_pool *pool;
145 bool if_running;
146 int err;
147
148 pool = xsk_get_pool_from_qid(netdev, qid);
149 if (!pool)
150 return -EINVAL;
151
152 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
153
154 if (if_running) {
155 err = i40e_queue_pair_disable(vsi, qid);
156 if (err)
157 return err;
158 }
159
160 clear_bit(qid, vsi->af_xdp_zc_qps);
161 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
162
163 if (if_running) {
164 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
165 if (err)
166 return err;
167 err = i40e_queue_pair_enable(vsi, qid);
168 if (err)
169 return err;
170 }
171
172 return 0;
173 }
174
175 /**
176 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
177 * a ring/qid
178 * @vsi: Current VSI
179 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
180 * @qid: Rx ring to (dis)associate buffer pool (from)to
181 *
182 * This function enables or disables a buffer pool to a certain ring.
183 *
184 * Returns 0 on success, <0 on failure
185 **/
i40e_xsk_pool_setup(struct i40e_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)186 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
187 u16 qid)
188 {
189 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
190 i40e_xsk_pool_disable(vsi, qid);
191 }
192
193 /**
194 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
195 * @rx_ring: Rx ring
196 * @xdp: xdp_buff used as input to the XDP program
197 * @xdp_prog: XDP program to run
198 *
199 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
200 **/
i40e_run_xdp_zc(struct i40e_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)201 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
202 struct bpf_prog *xdp_prog)
203 {
204 int err, result = I40E_XDP_PASS;
205 struct i40e_ring *xdp_ring;
206 u32 act;
207
208 act = bpf_prog_run_xdp(xdp_prog, xdp);
209
210 if (likely(act == XDP_REDIRECT)) {
211 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
212 if (!err)
213 return I40E_XDP_REDIR;
214 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
215 result = I40E_XDP_EXIT;
216 else
217 result = I40E_XDP_CONSUMED;
218 goto out_failure;
219 }
220
221 switch (act) {
222 case XDP_PASS:
223 break;
224 case XDP_TX:
225 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
226 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
227 if (result == I40E_XDP_CONSUMED)
228 goto out_failure;
229 break;
230 case XDP_DROP:
231 result = I40E_XDP_CONSUMED;
232 break;
233 default:
234 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
235 fallthrough;
236 case XDP_ABORTED:
237 result = I40E_XDP_CONSUMED;
238 out_failure:
239 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
240 }
241 return result;
242 }
243
i40e_alloc_rx_buffers_zc(struct i40e_ring * rx_ring,u16 count)244 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
245 {
246 u16 ntu = rx_ring->next_to_use;
247 union i40e_rx_desc *rx_desc;
248 struct xdp_buff **xdp;
249 u32 nb_buffs, i;
250 dma_addr_t dma;
251
252 rx_desc = I40E_RX_DESC(rx_ring, ntu);
253 xdp = i40e_rx_bi(rx_ring, ntu);
254
255 nb_buffs = min_t(u16, count, rx_ring->count - ntu);
256 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
257 if (!nb_buffs)
258 return false;
259
260 i = nb_buffs;
261 while (i--) {
262 dma = xsk_buff_xdp_get_dma(*xdp);
263 rx_desc->read.pkt_addr = cpu_to_le64(dma);
264 rx_desc->read.hdr_addr = 0;
265
266 rx_desc++;
267 xdp++;
268 }
269
270 ntu += nb_buffs;
271 if (ntu == rx_ring->count) {
272 rx_desc = I40E_RX_DESC(rx_ring, 0);
273 ntu = 0;
274 }
275
276 /* clear the status bits for the next_to_use descriptor */
277 rx_desc->wb.qword1.status_error_len = 0;
278 i40e_release_rx_desc(rx_ring, ntu);
279
280 return count == nb_buffs;
281 }
282
283 /**
284 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
285 * @rx_ring: Rx ring
286 * @xdp: xdp_buff
287 *
288 * This functions allocates a new skb from a zero-copy Rx buffer.
289 *
290 * Returns the skb, or NULL on failure.
291 **/
i40e_construct_skb_zc(struct i40e_ring * rx_ring,struct xdp_buff * xdp)292 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
293 struct xdp_buff *xdp)
294 {
295 unsigned int totalsize = xdp->data_end - xdp->data_meta;
296 unsigned int metasize = xdp->data - xdp->data_meta;
297 struct sk_buff *skb;
298
299 net_prefetch(xdp->data_meta);
300
301 /* allocate a skb to store the frags */
302 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
303 GFP_ATOMIC | __GFP_NOWARN);
304 if (unlikely(!skb))
305 goto out;
306
307 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
308 ALIGN(totalsize, sizeof(long)));
309
310 if (metasize) {
311 skb_metadata_set(skb, metasize);
312 __skb_pull(skb, metasize);
313 }
314
315 out:
316 xsk_buff_free(xdp);
317 return skb;
318 }
319
i40e_handle_xdp_result_zc(struct i40e_ring * rx_ring,struct xdp_buff * xdp_buff,union i40e_rx_desc * rx_desc,unsigned int * rx_packets,unsigned int * rx_bytes,unsigned int size,unsigned int xdp_res,bool * failure)320 static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
321 struct xdp_buff *xdp_buff,
322 union i40e_rx_desc *rx_desc,
323 unsigned int *rx_packets,
324 unsigned int *rx_bytes,
325 unsigned int size,
326 unsigned int xdp_res,
327 bool *failure)
328 {
329 struct sk_buff *skb;
330
331 *rx_packets = 1;
332 *rx_bytes = size;
333
334 if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
335 return;
336
337 if (xdp_res == I40E_XDP_EXIT) {
338 *failure = true;
339 return;
340 }
341
342 if (xdp_res == I40E_XDP_CONSUMED) {
343 xsk_buff_free(xdp_buff);
344 return;
345 }
346 if (xdp_res == I40E_XDP_PASS) {
347 /* NB! We are not checking for errors using
348 * i40e_test_staterr with
349 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
350 * SBP is *not* set in PRT_SBPVSI (default not set).
351 */
352 skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
353 if (!skb) {
354 rx_ring->rx_stats.alloc_buff_failed++;
355 *rx_packets = 0;
356 *rx_bytes = 0;
357 return;
358 }
359
360 if (eth_skb_pad(skb)) {
361 *rx_packets = 0;
362 *rx_bytes = 0;
363 return;
364 }
365
366 *rx_bytes = skb->len;
367 i40e_process_skb_fields(rx_ring, rx_desc, skb);
368 napi_gro_receive(&rx_ring->q_vector->napi, skb);
369 return;
370 }
371
372 /* Should never get here, as all valid cases have been handled already.
373 */
374 WARN_ON_ONCE(1);
375 }
376
377 /**
378 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
379 * @rx_ring: Rx ring
380 * @budget: NAPI budget
381 *
382 * Returns amount of work completed
383 **/
i40e_clean_rx_irq_zc(struct i40e_ring * rx_ring,int budget)384 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
385 {
386 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
387 u16 next_to_clean = rx_ring->next_to_clean;
388 u16 count_mask = rx_ring->count - 1;
389 unsigned int xdp_res, xdp_xmit = 0;
390 struct bpf_prog *xdp_prog;
391 bool failure = false;
392 u16 cleaned_count;
393
394 /* NB! xdp_prog will always be !NULL, due to the fact that
395 * this path is enabled by setting an XDP program.
396 */
397 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
398
399 while (likely(total_rx_packets < (unsigned int)budget)) {
400 union i40e_rx_desc *rx_desc;
401 unsigned int rx_packets;
402 unsigned int rx_bytes;
403 struct xdp_buff *bi;
404 unsigned int size;
405 u64 qword;
406
407 rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
408 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
409
410 /* This memory barrier is needed to keep us from reading
411 * any other fields out of the rx_desc until we have
412 * verified the descriptor has been written back.
413 */
414 dma_rmb();
415
416 if (i40e_rx_is_programming_status(qword)) {
417 i40e_clean_programming_status(rx_ring,
418 rx_desc->raw.qword[0],
419 qword);
420 bi = *i40e_rx_bi(rx_ring, next_to_clean);
421 xsk_buff_free(bi);
422 next_to_clean = (next_to_clean + 1) & count_mask;
423 continue;
424 }
425
426 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
427 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
428 if (!size)
429 break;
430
431 bi = *i40e_rx_bi(rx_ring, next_to_clean);
432 xsk_buff_set_size(bi, size);
433 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
434
435 xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
436 i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
437 &rx_bytes, size, xdp_res, &failure);
438 if (failure)
439 break;
440 total_rx_packets += rx_packets;
441 total_rx_bytes += rx_bytes;
442 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
443 next_to_clean = (next_to_clean + 1) & count_mask;
444 }
445
446 rx_ring->next_to_clean = next_to_clean;
447 cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
448
449 if (cleaned_count >= I40E_RX_BUFFER_WRITE)
450 failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
451
452 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
453 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
454
455 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
456 if (failure || next_to_clean == rx_ring->next_to_use)
457 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
458 else
459 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
460
461 return (int)total_rx_packets;
462 }
463 return failure ? budget : (int)total_rx_packets;
464 }
465
i40e_xmit_pkt(struct i40e_ring * xdp_ring,struct xdp_desc * desc,unsigned int * total_bytes)466 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
467 unsigned int *total_bytes)
468 {
469 struct i40e_tx_desc *tx_desc;
470 dma_addr_t dma;
471
472 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
473 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
474
475 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
476 tx_desc->buffer_addr = cpu_to_le64(dma);
477 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
478 0, desc->len, 0);
479
480 *total_bytes += desc->len;
481 }
482
i40e_xmit_pkt_batch(struct i40e_ring * xdp_ring,struct xdp_desc * desc,unsigned int * total_bytes)483 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
484 unsigned int *total_bytes)
485 {
486 u16 ntu = xdp_ring->next_to_use;
487 struct i40e_tx_desc *tx_desc;
488 dma_addr_t dma;
489 u32 i;
490
491 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
492 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
493 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
494
495 tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
496 tx_desc->buffer_addr = cpu_to_le64(dma);
497 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
498 I40E_TX_DESC_CMD_EOP,
499 0, desc[i].len, 0);
500
501 *total_bytes += desc[i].len;
502 }
503
504 xdp_ring->next_to_use = ntu;
505 }
506
i40e_fill_tx_hw_ring(struct i40e_ring * xdp_ring,struct xdp_desc * descs,u32 nb_pkts,unsigned int * total_bytes)507 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
508 unsigned int *total_bytes)
509 {
510 u32 batched, leftover, i;
511
512 batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
513 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
514 for (i = 0; i < batched; i += PKTS_PER_BATCH)
515 i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
516 for (i = batched; i < batched + leftover; i++)
517 i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
518 }
519
i40e_set_rs_bit(struct i40e_ring * xdp_ring)520 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
521 {
522 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
523 struct i40e_tx_desc *tx_desc;
524
525 tx_desc = I40E_TX_DESC(xdp_ring, ntu);
526 tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
527 }
528
529 /**
530 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
531 * @xdp_ring: XDP Tx ring
532 * @budget: NAPI budget
533 *
534 * Returns true if the work is finished.
535 **/
i40e_xmit_zc(struct i40e_ring * xdp_ring,unsigned int budget)536 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
537 {
538 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
539 u32 nb_pkts, nb_processed = 0;
540 unsigned int total_bytes = 0;
541
542 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
543 if (!nb_pkts)
544 return true;
545
546 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
547 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
548 i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
549 xdp_ring->next_to_use = 0;
550 }
551
552 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
553 &total_bytes);
554
555 /* Request an interrupt for the last frame and bump tail ptr. */
556 i40e_set_rs_bit(xdp_ring);
557 i40e_xdp_ring_update_tail(xdp_ring);
558
559 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
560
561 return nb_pkts < budget;
562 }
563
564 /**
565 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
566 * @tx_ring: XDP Tx ring
567 * @tx_bi: Tx buffer info to clean
568 **/
i40e_clean_xdp_tx_buffer(struct i40e_ring * tx_ring,struct i40e_tx_buffer * tx_bi)569 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
570 struct i40e_tx_buffer *tx_bi)
571 {
572 xdp_return_frame(tx_bi->xdpf);
573 tx_ring->xdp_tx_active--;
574 dma_unmap_single(tx_ring->dev,
575 dma_unmap_addr(tx_bi, dma),
576 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
577 dma_unmap_len_set(tx_bi, len, 0);
578 }
579
580 /**
581 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
582 * @vsi: Current VSI
583 * @tx_ring: XDP Tx ring
584 *
585 * Returns true if cleanup/tranmission is done.
586 **/
i40e_clean_xdp_tx_irq(struct i40e_vsi * vsi,struct i40e_ring * tx_ring)587 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
588 {
589 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
590 u32 i, completed_frames, xsk_frames = 0;
591 u32 head_idx = i40e_get_head(tx_ring);
592 struct i40e_tx_buffer *tx_bi;
593 unsigned int ntc;
594
595 if (head_idx < tx_ring->next_to_clean)
596 head_idx += tx_ring->count;
597 completed_frames = head_idx - tx_ring->next_to_clean;
598
599 if (completed_frames == 0)
600 goto out_xmit;
601
602 if (likely(!tx_ring->xdp_tx_active)) {
603 xsk_frames = completed_frames;
604 goto skip;
605 }
606
607 ntc = tx_ring->next_to_clean;
608
609 for (i = 0; i < completed_frames; i++) {
610 tx_bi = &tx_ring->tx_bi[ntc];
611
612 if (tx_bi->xdpf) {
613 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
614 tx_bi->xdpf = NULL;
615 } else {
616 xsk_frames++;
617 }
618
619 if (++ntc >= tx_ring->count)
620 ntc = 0;
621 }
622
623 skip:
624 tx_ring->next_to_clean += completed_frames;
625 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
626 tx_ring->next_to_clean -= tx_ring->count;
627
628 if (xsk_frames)
629 xsk_tx_completed(bp, xsk_frames);
630
631 i40e_arm_wb(tx_ring, vsi, completed_frames);
632
633 out_xmit:
634 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
635 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
636
637 return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
638 }
639
640 /**
641 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
642 * @dev: the netdevice
643 * @queue_id: queue id to wake up
644 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
645 *
646 * Returns <0 for errors, 0 otherwise.
647 **/
i40e_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)648 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
649 {
650 struct i40e_netdev_priv *np = netdev_priv(dev);
651 struct i40e_vsi *vsi = np->vsi;
652 struct i40e_pf *pf = vsi->back;
653 struct i40e_ring *ring;
654
655 if (test_bit(__I40E_CONFIG_BUSY, pf->state))
656 return -EAGAIN;
657
658 if (test_bit(__I40E_VSI_DOWN, vsi->state))
659 return -ENETDOWN;
660
661 if (!i40e_enabled_xdp_vsi(vsi))
662 return -EINVAL;
663
664 if (queue_id >= vsi->num_queue_pairs)
665 return -EINVAL;
666
667 if (!vsi->xdp_rings[queue_id]->xsk_pool)
668 return -EINVAL;
669
670 ring = vsi->xdp_rings[queue_id];
671
672 /* The idea here is that if NAPI is running, mark a miss, so
673 * it will run again. If not, trigger an interrupt and
674 * schedule the NAPI from interrupt context. If NAPI would be
675 * scheduled here, the interrupt affinity would not be
676 * honored.
677 */
678 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
679 i40e_force_wb(vsi, ring->q_vector);
680
681 return 0;
682 }
683
i40e_xsk_clean_rx_ring(struct i40e_ring * rx_ring)684 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
685 {
686 u16 count_mask = rx_ring->count - 1;
687 u16 ntc = rx_ring->next_to_clean;
688 u16 ntu = rx_ring->next_to_use;
689
690 for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
691 struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
692
693 xsk_buff_free(rx_bi);
694 }
695 }
696
697 /**
698 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
699 * @tx_ring: XDP Tx ring
700 **/
i40e_xsk_clean_tx_ring(struct i40e_ring * tx_ring)701 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
702 {
703 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
704 struct xsk_buff_pool *bp = tx_ring->xsk_pool;
705 struct i40e_tx_buffer *tx_bi;
706 u32 xsk_frames = 0;
707
708 while (ntc != ntu) {
709 tx_bi = &tx_ring->tx_bi[ntc];
710
711 if (tx_bi->xdpf)
712 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
713 else
714 xsk_frames++;
715
716 tx_bi->xdpf = NULL;
717
718 ntc++;
719 if (ntc >= tx_ring->count)
720 ntc = 0;
721 }
722
723 if (xsk_frames)
724 xsk_tx_completed(bp, xsk_frames);
725 }
726
727 /**
728 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
729 * buffer pool attached
730 * @vsi: vsi
731 *
732 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
733 **/
i40e_xsk_any_rx_ring_enabled(struct i40e_vsi * vsi)734 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
735 {
736 struct net_device *netdev = vsi->netdev;
737 int i;
738
739 for (i = 0; i < vsi->num_queue_pairs; i++) {
740 if (xsk_get_pool_from_qid(netdev, i))
741 return true;
742 }
743
744 return false;
745 }
746