1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
6
7 #include "igc.h"
8 #include "igc_xdp.h"
9
igc_xdp_set_prog(struct igc_adapter * adapter,struct bpf_prog * prog,struct netlink_ext_ack * extack)10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 struct netlink_ext_ack *extack)
12 {
13 struct net_device *dev = adapter->netdev;
14 bool if_running = netif_running(dev);
15 struct bpf_prog *old_prog;
16
17 if (dev->mtu > ETH_DATA_LEN) {
18 /* For now, the driver doesn't support XDP functionality with
19 * jumbo frames so we return error.
20 */
21 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
22 return -EOPNOTSUPP;
23 }
24
25 if (if_running)
26 igc_close(dev);
27
28 old_prog = xchg(&adapter->xdp_prog, prog);
29 if (old_prog)
30 bpf_prog_put(old_prog);
31
32 if (if_running)
33 igc_open(dev);
34
35 return 0;
36 }
37
igc_xdp_enable_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)38 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
39 struct xsk_buff_pool *pool, u16 queue_id)
40 {
41 struct net_device *ndev = adapter->netdev;
42 struct device *dev = &adapter->pdev->dev;
43 struct igc_ring *rx_ring, *tx_ring;
44 struct napi_struct *napi;
45 bool needs_reset;
46 u32 frame_size;
47 int err;
48
49 if (queue_id >= adapter->num_rx_queues ||
50 queue_id >= adapter->num_tx_queues)
51 return -EINVAL;
52
53 frame_size = xsk_pool_get_rx_frame_size(pool);
54 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
55 /* When XDP is enabled, the driver doesn't support frames that
56 * span over multiple buffers. To avoid that, we check if xsk
57 * frame size is big enough to fit the max ethernet frame size
58 * + vlan double tagging.
59 */
60 return -EOPNOTSUPP;
61 }
62
63 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
64 if (err) {
65 netdev_err(ndev, "Failed to map xsk pool\n");
66 return err;
67 }
68
69 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
70
71 rx_ring = adapter->rx_ring[queue_id];
72 tx_ring = adapter->tx_ring[queue_id];
73 /* Rx and Tx rings share the same napi context. */
74 napi = &rx_ring->q_vector->napi;
75
76 if (needs_reset) {
77 igc_disable_rx_ring(rx_ring);
78 igc_disable_tx_ring(tx_ring);
79 napi_disable(napi);
80 }
81
82 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
83 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
84
85 if (needs_reset) {
86 napi_enable(napi);
87 igc_enable_rx_ring(rx_ring);
88 igc_enable_tx_ring(tx_ring);
89
90 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
91 if (err) {
92 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
93 return err;
94 }
95 }
96
97 return 0;
98 }
99
igc_xdp_disable_pool(struct igc_adapter * adapter,u16 queue_id)100 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
101 {
102 struct igc_ring *rx_ring, *tx_ring;
103 struct xsk_buff_pool *pool;
104 struct napi_struct *napi;
105 bool needs_reset;
106
107 if (queue_id >= adapter->num_rx_queues ||
108 queue_id >= adapter->num_tx_queues)
109 return -EINVAL;
110
111 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
112 if (!pool)
113 return -EINVAL;
114
115 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
116
117 rx_ring = adapter->rx_ring[queue_id];
118 tx_ring = adapter->tx_ring[queue_id];
119 /* Rx and Tx rings share the same napi context. */
120 napi = &rx_ring->q_vector->napi;
121
122 if (needs_reset) {
123 igc_disable_rx_ring(rx_ring);
124 igc_disable_tx_ring(tx_ring);
125 napi_disable(napi);
126 }
127
128 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
129 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
130 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
131
132 if (needs_reset) {
133 napi_enable(napi);
134 igc_enable_rx_ring(rx_ring);
135 igc_enable_tx_ring(tx_ring);
136 }
137
138 return 0;
139 }
140
igc_xdp_setup_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)141 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
142 u16 queue_id)
143 {
144 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
145 igc_xdp_disable_pool(adapter, queue_id);
146 }
147