1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7
8 #include "ixgbe.h"
9 #include "ixgbe_txrx_common.h"
10
ixgbe_xsk_pool(struct ixgbe_adapter * adapter,struct ixgbe_ring * ring)11 struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12 struct ixgbe_ring *ring)
13 {
14 bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 int qid = ring->ring_idx;
16
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18 return NULL;
19
20 return xsk_get_pool_from_qid(adapter->netdev, qid);
21 }
22
ixgbe_xsk_pool_enable(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)23 static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
24 struct xsk_buff_pool *pool,
25 u16 qid)
26 {
27 struct net_device *netdev = adapter->netdev;
28 bool if_running;
29 int err;
30
31 if (qid >= adapter->num_rx_queues)
32 return -EINVAL;
33
34 if (qid >= netdev->real_num_rx_queues ||
35 qid >= netdev->real_num_tx_queues)
36 return -EINVAL;
37
38 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
39 if (err)
40 return err;
41
42 if_running = netif_running(adapter->netdev) &&
43 ixgbe_enabled_xdp_adapter(adapter);
44
45 if (if_running)
46 ixgbe_txrx_ring_disable(adapter, qid);
47
48 set_bit(qid, adapter->af_xdp_zc_qps);
49
50 if (if_running) {
51 ixgbe_txrx_ring_enable(adapter, qid);
52
53 /* Kick start the NAPI context so that receiving will start */
54 err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
55 if (err) {
56 clear_bit(qid, adapter->af_xdp_zc_qps);
57 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
58 return err;
59 }
60 }
61
62 return 0;
63 }
64
ixgbe_xsk_pool_disable(struct ixgbe_adapter * adapter,u16 qid)65 static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
66 {
67 struct xsk_buff_pool *pool;
68 bool if_running;
69
70 pool = xsk_get_pool_from_qid(adapter->netdev, qid);
71 if (!pool)
72 return -EINVAL;
73
74 if_running = netif_running(adapter->netdev) &&
75 ixgbe_enabled_xdp_adapter(adapter);
76
77 if (if_running)
78 ixgbe_txrx_ring_disable(adapter, qid);
79
80 clear_bit(qid, adapter->af_xdp_zc_qps);
81 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
82
83 if (if_running)
84 ixgbe_txrx_ring_enable(adapter, qid);
85
86 return 0;
87 }
88
ixgbe_xsk_pool_setup(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)89 int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
90 struct xsk_buff_pool *pool,
91 u16 qid)
92 {
93 return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
94 ixgbe_xsk_pool_disable(adapter, qid);
95 }
96
ixgbe_run_xdp_zc(struct ixgbe_adapter * adapter,struct ixgbe_ring * rx_ring,struct xdp_buff * xdp)97 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98 struct ixgbe_ring *rx_ring,
99 struct xdp_buff *xdp)
100 {
101 int err, result = IXGBE_XDP_PASS;
102 struct bpf_prog *xdp_prog;
103 struct ixgbe_ring *ring;
104 struct xdp_frame *xdpf;
105 u32 act;
106
107 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108 act = bpf_prog_run_xdp(xdp_prog, xdp);
109
110 if (likely(act == XDP_REDIRECT)) {
111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112 if (!err)
113 return IXGBE_XDP_REDIR;
114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
115 result = IXGBE_XDP_EXIT;
116 else
117 result = IXGBE_XDP_CONSUMED;
118 goto out_failure;
119 }
120
121 switch (act) {
122 case XDP_PASS:
123 break;
124 case XDP_TX:
125 xdpf = xdp_convert_buff_to_frame(xdp);
126 if (unlikely(!xdpf))
127 goto out_failure;
128 ring = ixgbe_determine_xdp_ring(adapter);
129 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
130 spin_lock(&ring->tx_lock);
131 result = ixgbe_xmit_xdp_ring(ring, xdpf);
132 if (static_branch_unlikely(&ixgbe_xdp_locking_key))
133 spin_unlock(&ring->tx_lock);
134 if (result == IXGBE_XDP_CONSUMED)
135 goto out_failure;
136 break;
137 case XDP_DROP:
138 result = IXGBE_XDP_CONSUMED;
139 break;
140 default:
141 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
142 fallthrough;
143 case XDP_ABORTED:
144 result = IXGBE_XDP_CONSUMED;
145 out_failure:
146 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
147 }
148 return result;
149 }
150
ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring * rx_ring,u16 count)151 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
152 {
153 union ixgbe_adv_rx_desc *rx_desc;
154 struct ixgbe_rx_buffer *bi;
155 u16 i = rx_ring->next_to_use;
156 dma_addr_t dma;
157 bool ok = true;
158
159 /* nothing to do */
160 if (!count)
161 return true;
162
163 rx_desc = IXGBE_RX_DESC(rx_ring, i);
164 bi = &rx_ring->rx_buffer_info[i];
165 i -= rx_ring->count;
166
167 do {
168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
169 if (!bi->xdp) {
170 ok = false;
171 break;
172 }
173
174 dma = xsk_buff_xdp_get_dma(bi->xdp);
175
176 /* Refresh the desc even if buffer_addrs didn't change
177 * because each write-back erases this info.
178 */
179 rx_desc->read.pkt_addr = cpu_to_le64(dma);
180
181 rx_desc++;
182 bi++;
183 i++;
184 if (unlikely(!i)) {
185 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
186 bi = rx_ring->rx_buffer_info;
187 i -= rx_ring->count;
188 }
189
190 /* clear the length for the next_to_use descriptor */
191 rx_desc->wb.upper.length = 0;
192
193 count--;
194 } while (count);
195
196 i += rx_ring->count;
197
198 if (rx_ring->next_to_use != i) {
199 rx_ring->next_to_use = i;
200
201 /* Force memory writes to complete before letting h/w
202 * know there are new descriptors to fetch. (Only
203 * applicable for weak-ordered memory model archs,
204 * such as IA-64).
205 */
206 wmb();
207 writel(i, rx_ring->tail);
208 }
209
210 return ok;
211 }
212
ixgbe_construct_skb_zc(struct ixgbe_ring * rx_ring,const struct xdp_buff * xdp)213 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
214 const struct xdp_buff *xdp)
215 {
216 unsigned int totalsize = xdp->data_end - xdp->data_meta;
217 unsigned int metasize = xdp->data - xdp->data_meta;
218 struct sk_buff *skb;
219
220 net_prefetch(xdp->data_meta);
221
222 /* allocate a skb to store the frags */
223 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
224 GFP_ATOMIC | __GFP_NOWARN);
225 if (unlikely(!skb))
226 return NULL;
227
228 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
229 ALIGN(totalsize, sizeof(long)));
230
231 if (metasize) {
232 skb_metadata_set(skb, metasize);
233 __skb_pull(skb, metasize);
234 }
235
236 return skb;
237 }
238
ixgbe_inc_ntc(struct ixgbe_ring * rx_ring)239 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
240 {
241 u32 ntc = rx_ring->next_to_clean + 1;
242
243 ntc = (ntc < rx_ring->count) ? ntc : 0;
244 rx_ring->next_to_clean = ntc;
245 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
246 }
247
ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * rx_ring,const int budget)248 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
249 struct ixgbe_ring *rx_ring,
250 const int budget)
251 {
252 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
253 struct ixgbe_adapter *adapter = q_vector->adapter;
254 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
255 unsigned int xdp_res, xdp_xmit = 0;
256 bool failure = false;
257 struct sk_buff *skb;
258
259 while (likely(total_rx_packets < budget)) {
260 union ixgbe_adv_rx_desc *rx_desc;
261 struct ixgbe_rx_buffer *bi;
262 unsigned int size;
263
264 /* return some buffers to hardware, one at a time is too slow */
265 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
266 failure = failure ||
267 !ixgbe_alloc_rx_buffers_zc(rx_ring,
268 cleaned_count);
269 cleaned_count = 0;
270 }
271
272 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
273 size = le16_to_cpu(rx_desc->wb.upper.length);
274 if (!size)
275 break;
276
277 /* This memory barrier is needed to keep us from reading
278 * any other fields out of the rx_desc until we know the
279 * descriptor has been written back
280 */
281 dma_rmb();
282
283 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
284
285 if (unlikely(!ixgbe_test_staterr(rx_desc,
286 IXGBE_RXD_STAT_EOP))) {
287 struct ixgbe_rx_buffer *next_bi;
288
289 xsk_buff_free(bi->xdp);
290 bi->xdp = NULL;
291 ixgbe_inc_ntc(rx_ring);
292 next_bi =
293 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
294 next_bi->discard = true;
295 continue;
296 }
297
298 if (unlikely(bi->discard)) {
299 xsk_buff_free(bi->xdp);
300 bi->xdp = NULL;
301 bi->discard = false;
302 ixgbe_inc_ntc(rx_ring);
303 continue;
304 }
305
306 bi->xdp->data_end = bi->xdp->data + size;
307 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
308 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
309
310 if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
311 xdp_xmit |= xdp_res;
312 } else if (xdp_res == IXGBE_XDP_EXIT) {
313 failure = true;
314 break;
315 } else if (xdp_res == IXGBE_XDP_CONSUMED) {
316 xsk_buff_free(bi->xdp);
317 } else if (xdp_res == IXGBE_XDP_PASS) {
318 goto construct_skb;
319 }
320
321 bi->xdp = NULL;
322 total_rx_packets++;
323 total_rx_bytes += size;
324
325 cleaned_count++;
326 ixgbe_inc_ntc(rx_ring);
327 continue;
328
329 construct_skb:
330 /* XDP_PASS path */
331 skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
332 if (!skb) {
333 rx_ring->rx_stats.alloc_rx_buff_failed++;
334 break;
335 }
336
337 xsk_buff_free(bi->xdp);
338 bi->xdp = NULL;
339
340 cleaned_count++;
341 ixgbe_inc_ntc(rx_ring);
342
343 if (eth_skb_pad(skb))
344 continue;
345
346 total_rx_bytes += skb->len;
347 total_rx_packets++;
348
349 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
350 ixgbe_rx_skb(q_vector, skb);
351 }
352
353 if (xdp_xmit & IXGBE_XDP_REDIR)
354 xdp_do_flush_map();
355
356 if (xdp_xmit & IXGBE_XDP_TX) {
357 struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
358
359 ixgbe_xdp_ring_update_tail_locked(ring);
360 }
361
362 u64_stats_update_begin(&rx_ring->syncp);
363 rx_ring->stats.packets += total_rx_packets;
364 rx_ring->stats.bytes += total_rx_bytes;
365 u64_stats_update_end(&rx_ring->syncp);
366 q_vector->rx.total_packets += total_rx_packets;
367 q_vector->rx.total_bytes += total_rx_bytes;
368
369 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
370 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
371 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
372 else
373 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
374
375 return (int)total_rx_packets;
376 }
377 return failure ? budget : (int)total_rx_packets;
378 }
379
ixgbe_xsk_clean_rx_ring(struct ixgbe_ring * rx_ring)380 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
381 {
382 struct ixgbe_rx_buffer *bi;
383 u16 i;
384
385 for (i = 0; i < rx_ring->count; i++) {
386 bi = &rx_ring->rx_buffer_info[i];
387
388 if (!bi->xdp)
389 continue;
390
391 xsk_buff_free(bi->xdp);
392 bi->xdp = NULL;
393 }
394 }
395
ixgbe_xmit_zc(struct ixgbe_ring * xdp_ring,unsigned int budget)396 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
397 {
398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
399 union ixgbe_adv_tx_desc *tx_desc = NULL;
400 struct ixgbe_tx_buffer *tx_bi;
401 bool work_done = true;
402 struct xdp_desc desc;
403 dma_addr_t dma;
404 u32 cmd_type;
405
406 while (budget-- > 0) {
407 if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
408 work_done = false;
409 break;
410 }
411
412 if (!netif_carrier_ok(xdp_ring->netdev))
413 break;
414
415 if (!xsk_tx_peek_desc(pool, &desc))
416 break;
417
418 dma = xsk_buff_raw_get_dma(pool, desc.addr);
419 xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
420
421 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
422 tx_bi->bytecount = desc.len;
423 tx_bi->xdpf = NULL;
424 tx_bi->gso_segs = 1;
425
426 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
427 tx_desc->read.buffer_addr = cpu_to_le64(dma);
428
429 /* put descriptor type bits */
430 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
431 IXGBE_ADVTXD_DCMD_DEXT |
432 IXGBE_ADVTXD_DCMD_IFCS;
433 cmd_type |= desc.len | IXGBE_TXD_CMD;
434 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
435 tx_desc->read.olinfo_status =
436 cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
437
438 xdp_ring->next_to_use++;
439 if (xdp_ring->next_to_use == xdp_ring->count)
440 xdp_ring->next_to_use = 0;
441 }
442
443 if (tx_desc) {
444 ixgbe_xdp_ring_update_tail(xdp_ring);
445 xsk_tx_release(pool);
446 }
447
448 return !!budget && work_done;
449 }
450
ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * tx_bi)451 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
452 struct ixgbe_tx_buffer *tx_bi)
453 {
454 xdp_return_frame(tx_bi->xdpf);
455 dma_unmap_single(tx_ring->dev,
456 dma_unmap_addr(tx_bi, dma),
457 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
458 dma_unmap_len_set(tx_bi, len, 0);
459 }
460
ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * tx_ring,int napi_budget)461 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
462 struct ixgbe_ring *tx_ring, int napi_budget)
463 {
464 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
465 unsigned int total_packets = 0, total_bytes = 0;
466 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
467 union ixgbe_adv_tx_desc *tx_desc;
468 struct ixgbe_tx_buffer *tx_bi;
469 u32 xsk_frames = 0;
470
471 tx_bi = &tx_ring->tx_buffer_info[ntc];
472 tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
473
474 while (ntc != ntu) {
475 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
476 break;
477
478 total_bytes += tx_bi->bytecount;
479 total_packets += tx_bi->gso_segs;
480
481 if (tx_bi->xdpf)
482 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
483 else
484 xsk_frames++;
485
486 tx_bi->xdpf = NULL;
487
488 tx_bi++;
489 tx_desc++;
490 ntc++;
491 if (unlikely(ntc == tx_ring->count)) {
492 ntc = 0;
493 tx_bi = tx_ring->tx_buffer_info;
494 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
495 }
496
497 /* issue prefetch for next Tx descriptor */
498 prefetch(tx_desc);
499 }
500
501 tx_ring->next_to_clean = ntc;
502
503 u64_stats_update_begin(&tx_ring->syncp);
504 tx_ring->stats.bytes += total_bytes;
505 tx_ring->stats.packets += total_packets;
506 u64_stats_update_end(&tx_ring->syncp);
507 q_vector->tx.total_bytes += total_bytes;
508 q_vector->tx.total_packets += total_packets;
509
510 if (xsk_frames)
511 xsk_tx_completed(pool, xsk_frames);
512
513 if (xsk_uses_need_wakeup(pool))
514 xsk_set_tx_need_wakeup(pool);
515
516 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
517 }
518
ixgbe_xsk_wakeup(struct net_device * dev,u32 qid,u32 flags)519 int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
520 {
521 struct ixgbe_adapter *adapter = netdev_priv(dev);
522 struct ixgbe_ring *ring;
523
524 if (test_bit(__IXGBE_DOWN, &adapter->state))
525 return -ENETDOWN;
526
527 if (!READ_ONCE(adapter->xdp_prog))
528 return -EINVAL;
529
530 if (qid >= adapter->num_xdp_queues)
531 return -EINVAL;
532
533 ring = adapter->xdp_ring[qid];
534
535 if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
536 return -ENETDOWN;
537
538 if (!ring->xsk_pool)
539 return -EINVAL;
540
541 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
542 u64 eics = BIT_ULL(ring->q_vector->v_idx);
543
544 ixgbe_irq_rearm_queues(adapter, eics);
545 }
546
547 return 0;
548 }
549
ixgbe_xsk_clean_tx_ring(struct ixgbe_ring * tx_ring)550 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
551 {
552 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
553 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
554 struct ixgbe_tx_buffer *tx_bi;
555 u32 xsk_frames = 0;
556
557 while (ntc != ntu) {
558 tx_bi = &tx_ring->tx_buffer_info[ntc];
559
560 if (tx_bi->xdpf)
561 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
562 else
563 xsk_frames++;
564
565 tx_bi->xdpf = NULL;
566
567 ntc++;
568 if (ntc == tx_ring->count)
569 ntc = 0;
570 }
571
572 if (xsk_frames)
573 xsk_tx_completed(pool, xsk_frames);
574 }
575