1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
3
4 #include <linux/seq_file.h>
5
6 #include "../nfp_net.h"
7 #include "../nfp_net_dp.h"
8 #include "../nfp_net_xsk.h"
9 #include "nfd3.h"
10
nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring * tx_ring)11 static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
12 {
13 struct nfp_nfd3_tx_buf *txbuf;
14 unsigned int idx;
15
16 while (tx_ring->rd_p != tx_ring->wr_p) {
17 idx = D_IDX(tx_ring, tx_ring->rd_p);
18 txbuf = &tx_ring->txbufs[idx];
19
20 txbuf->real_len = 0;
21
22 tx_ring->qcp_rd_p++;
23 tx_ring->rd_p++;
24
25 if (tx_ring->r_vec->xsk_pool) {
26 if (txbuf->is_xsk_tx)
27 nfp_nfd3_xsk_tx_free(txbuf);
28
29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
30 }
31 }
32 }
33
34 /**
35 * nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
36 * @dp: NFP Net data path struct
37 * @tx_ring: TX ring structure
38 *
39 * Assumes that the device is stopped, must be idempotent.
40 */
41 static void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)42 nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
43 {
44 struct netdev_queue *nd_q;
45 const skb_frag_t *frag;
46
47 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
48 struct nfp_nfd3_tx_buf *tx_buf;
49 struct sk_buff *skb;
50 int idx, nr_frags;
51
52 idx = D_IDX(tx_ring, tx_ring->rd_p);
53 tx_buf = &tx_ring->txbufs[idx];
54
55 skb = tx_ring->txbufs[idx].skb;
56 nr_frags = skb_shinfo(skb)->nr_frags;
57
58 if (tx_buf->fidx == -1) {
59 /* unmap head */
60 dma_unmap_single(dp->dev, tx_buf->dma_addr,
61 skb_headlen(skb), DMA_TO_DEVICE);
62 } else {
63 /* unmap fragment */
64 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
65 dma_unmap_page(dp->dev, tx_buf->dma_addr,
66 skb_frag_size(frag), DMA_TO_DEVICE);
67 }
68
69 /* check for last gather fragment */
70 if (tx_buf->fidx == nr_frags - 1)
71 dev_kfree_skb_any(skb);
72
73 tx_buf->dma_addr = 0;
74 tx_buf->skb = NULL;
75 tx_buf->fidx = -2;
76
77 tx_ring->qcp_rd_p++;
78 tx_ring->rd_p++;
79 }
80
81 if (tx_ring->is_xdp)
82 nfp_nfd3_xsk_tx_bufs_free(tx_ring);
83
84 memset(tx_ring->txds, 0, tx_ring->size);
85 tx_ring->wr_p = 0;
86 tx_ring->rd_p = 0;
87 tx_ring->qcp_rd_p = 0;
88 tx_ring->wr_ptr_add = 0;
89
90 if (tx_ring->is_xdp || !dp->netdev)
91 return;
92
93 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
94 netdev_tx_reset_queue(nd_q);
95 }
96
97 /**
98 * nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
99 * @tx_ring: TX ring to free
100 */
nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring * tx_ring)101 static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
102 {
103 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
104 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
105
106 kvfree(tx_ring->txbufs);
107
108 if (tx_ring->txds)
109 dma_free_coherent(dp->dev, tx_ring->size,
110 tx_ring->txds, tx_ring->dma);
111
112 tx_ring->cnt = 0;
113 tx_ring->txbufs = NULL;
114 tx_ring->txds = NULL;
115 tx_ring->dma = 0;
116 tx_ring->size = 0;
117 }
118
119 /**
120 * nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
121 * @dp: NFP Net data path struct
122 * @tx_ring: TX Ring structure to allocate
123 *
124 * Return: 0 on success, negative errno otherwise.
125 */
126 static int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)127 nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
128 {
129 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
130
131 tx_ring->cnt = dp->txd_cnt;
132
133 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
134 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
135 &tx_ring->dma,
136 GFP_KERNEL | __GFP_NOWARN);
137 if (!tx_ring->txds) {
138 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
139 tx_ring->cnt);
140 goto err_alloc;
141 }
142
143 tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
144 GFP_KERNEL);
145 if (!tx_ring->txbufs)
146 goto err_alloc;
147
148 if (!tx_ring->is_xdp && dp->netdev)
149 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
150 tx_ring->idx);
151
152 return 0;
153
154 err_alloc:
155 nfp_nfd3_tx_ring_free(tx_ring);
156 return -ENOMEM;
157 }
158
159 static void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)160 nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
161 struct nfp_net_tx_ring *tx_ring)
162 {
163 unsigned int i;
164
165 if (!tx_ring->is_xdp)
166 return;
167
168 for (i = 0; i < tx_ring->cnt; i++) {
169 if (!tx_ring->txbufs[i].frag)
170 return;
171
172 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
173 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
174 }
175 }
176
177 static int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)178 nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
179 struct nfp_net_tx_ring *tx_ring)
180 {
181 struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
182 unsigned int i;
183
184 if (!tx_ring->is_xdp)
185 return 0;
186
187 for (i = 0; i < tx_ring->cnt; i++) {
188 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
189 if (!txbufs[i].frag) {
190 nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
191 return -ENOMEM;
192 }
193 }
194
195 return 0;
196 }
197
198 static void
nfp_nfd3_print_tx_descs(struct seq_file * file,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p)199 nfp_nfd3_print_tx_descs(struct seq_file *file,
200 struct nfp_net_r_vector *r_vec,
201 struct nfp_net_tx_ring *tx_ring,
202 u32 d_rd_p, u32 d_wr_p)
203 {
204 struct nfp_nfd3_tx_desc *txd;
205 u32 txd_cnt = tx_ring->cnt;
206 int i;
207
208 for (i = 0; i < txd_cnt; i++) {
209 struct xdp_buff *xdp;
210 struct sk_buff *skb;
211
212 txd = &tx_ring->txds[i];
213 seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
214 txd->vals[0], txd->vals[1],
215 txd->vals[2], txd->vals[3]);
216
217 if (!tx_ring->is_xdp) {
218 skb = READ_ONCE(tx_ring->txbufs[i].skb);
219 if (skb)
220 seq_printf(file, " skb->head=%p skb->data=%p",
221 skb->head, skb->data);
222 } else {
223 xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
224 if (xdp)
225 seq_printf(file, " xdp->data=%p", xdp->data);
226 }
227
228 if (tx_ring->txbufs[i].dma_addr)
229 seq_printf(file, " dma_addr=%pad",
230 &tx_ring->txbufs[i].dma_addr);
231
232 if (i == tx_ring->rd_p % txd_cnt)
233 seq_puts(file, " H_RD");
234 if (i == tx_ring->wr_p % txd_cnt)
235 seq_puts(file, " H_WR");
236 if (i == d_rd_p % txd_cnt)
237 seq_puts(file, " D_RD");
238 if (i == d_wr_p % txd_cnt)
239 seq_puts(file, " D_WR");
240
241 seq_putc(file, '\n');
242 }
243 }
244
245 #define NFP_NFD3_CFG_CTRL_SUPPORTED \
246 (NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
247 NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
248 NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
249 NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
250 NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
251 NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
252 NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
253 NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
254 NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
255 NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
256 NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
257 NFP_NET_CFG_CTRL_LIVE_ADDR)
258
259 const struct nfp_dp_ops nfp_nfd3_ops = {
260 .version = NFP_NFD_VER_NFD3,
261 .tx_min_desc_per_pkt = 1,
262 .cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
263 .poll = nfp_nfd3_poll,
264 .xsk_poll = nfp_nfd3_xsk_poll,
265 .ctrl_poll = nfp_nfd3_ctrl_poll,
266 .xmit = nfp_nfd3_tx,
267 .ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
268 .rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
269 .tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
270 .tx_ring_reset = nfp_nfd3_tx_ring_reset,
271 .tx_ring_free = nfp_nfd3_tx_ring_free,
272 .tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
273 .tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
274 .print_tx_descs = nfp_nfd3_print_tx_descs
275 };
276