1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021, Intel Corporation. */
3
4 #include <net/xdp_sock_drv.h>
5
6 #include "stmmac.h"
7 #include "stmmac_xdp.h"
8
stmmac_xdp_enable_pool(struct stmmac_priv * priv,struct xsk_buff_pool * pool,u16 queue)9 static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
10 struct xsk_buff_pool *pool, u16 queue)
11 {
12 struct stmmac_channel *ch = &priv->channel[queue];
13 bool need_update;
14 u32 frame_size;
15 int err;
16
17 if (queue >= priv->plat->rx_queues_to_use ||
18 queue >= priv->plat->tx_queues_to_use)
19 return -EINVAL;
20
21 frame_size = xsk_pool_get_rx_frame_size(pool);
22 /* XDP ZC does not span multiple frame, make sure XSK pool buffer
23 * size can at least store Q-in-Q frame.
24 */
25 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
26 return -EOPNOTSUPP;
27
28 err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
29 if (err) {
30 netdev_err(priv->dev, "Failed to map xsk pool\n");
31 return err;
32 }
33
34 need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
35
36 if (need_update) {
37 napi_disable(&ch->rx_napi);
38 napi_disable(&ch->tx_napi);
39 stmmac_disable_rx_queue(priv, queue);
40 stmmac_disable_tx_queue(priv, queue);
41 }
42
43 set_bit(queue, priv->af_xdp_zc_qps);
44
45 if (need_update) {
46 stmmac_enable_rx_queue(priv, queue);
47 stmmac_enable_tx_queue(priv, queue);
48 napi_enable(&ch->rxtx_napi);
49
50 err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
51 if (err)
52 return err;
53 }
54
55 return 0;
56 }
57
stmmac_xdp_disable_pool(struct stmmac_priv * priv,u16 queue)58 static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
59 {
60 struct stmmac_channel *ch = &priv->channel[queue];
61 struct xsk_buff_pool *pool;
62 bool need_update;
63
64 if (queue >= priv->plat->rx_queues_to_use ||
65 queue >= priv->plat->tx_queues_to_use)
66 return -EINVAL;
67
68 pool = xsk_get_pool_from_qid(priv->dev, queue);
69 if (!pool)
70 return -EINVAL;
71
72 need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
73
74 if (need_update) {
75 napi_disable(&ch->rxtx_napi);
76 stmmac_disable_rx_queue(priv, queue);
77 stmmac_disable_tx_queue(priv, queue);
78 synchronize_rcu();
79 }
80
81 xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
82
83 clear_bit(queue, priv->af_xdp_zc_qps);
84
85 if (need_update) {
86 stmmac_enable_rx_queue(priv, queue);
87 stmmac_enable_tx_queue(priv, queue);
88 napi_enable(&ch->rx_napi);
89 napi_enable(&ch->tx_napi);
90 }
91
92 return 0;
93 }
94
stmmac_xdp_setup_pool(struct stmmac_priv * priv,struct xsk_buff_pool * pool,u16 queue)95 int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
96 u16 queue)
97 {
98 return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
99 stmmac_xdp_disable_pool(priv, queue);
100 }
101
stmmac_xdp_set_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct netlink_ext_ack * extack)102 int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
103 struct netlink_ext_ack *extack)
104 {
105 struct net_device *dev = priv->dev;
106 struct bpf_prog *old_prog;
107 bool need_update;
108 bool if_running;
109
110 if_running = netif_running(dev);
111
112 if (prog && dev->mtu > ETH_DATA_LEN) {
113 /* For now, the driver doesn't support XDP functionality with
114 * jumbo frames so we return error.
115 */
116 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
117 return -EOPNOTSUPP;
118 }
119
120 if (!prog)
121 xdp_features_clear_redirect_target(dev);
122
123 need_update = !!priv->xdp_prog != !!prog;
124 if (if_running && need_update)
125 stmmac_xdp_release(dev);
126
127 old_prog = xchg(&priv->xdp_prog, prog);
128 if (old_prog)
129 bpf_prog_put(old_prog);
130
131 /* Disable RX SPH for XDP operation */
132 priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
133
134 if (if_running && need_update)
135 stmmac_xdp_open(dev);
136
137 if (prog)
138 xdp_features_set_redirect_target(dev, false);
139
140 return 0;
141 }
142