1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/interrupt.h>
9 #include <linux/pci.h>
10 #include <net/tso.h>
11
12 #include "otx2_reg.h"
13 #include "otx2_common.h"
14 #include "otx2_struct.h"
15 #include "cn10k.h"
16
otx2_nix_rq_op_stats(struct queue_stats * stats,struct otx2_nic * pfvf,int qidx)17 static void otx2_nix_rq_op_stats(struct queue_stats *stats,
18 struct otx2_nic *pfvf, int qidx)
19 {
20 u64 incr = (u64)qidx << 32;
21 u64 *ptr;
22
23 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
24 stats->bytes = otx2_atomic64_add(incr, ptr);
25
26 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
27 stats->pkts = otx2_atomic64_add(incr, ptr);
28 }
29
otx2_nix_sq_op_stats(struct queue_stats * stats,struct otx2_nic * pfvf,int qidx)30 static void otx2_nix_sq_op_stats(struct queue_stats *stats,
31 struct otx2_nic *pfvf, int qidx)
32 {
33 u64 incr = (u64)qidx << 32;
34 u64 *ptr;
35
36 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
37 stats->bytes = otx2_atomic64_add(incr, ptr);
38
39 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
40 stats->pkts = otx2_atomic64_add(incr, ptr);
41 }
42
otx2_update_lmac_stats(struct otx2_nic * pfvf)43 void otx2_update_lmac_stats(struct otx2_nic *pfvf)
44 {
45 struct msg_req *req;
46
47 if (!netif_running(pfvf->netdev))
48 return;
49
50 mutex_lock(&pfvf->mbox.lock);
51 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
52 if (!req) {
53 mutex_unlock(&pfvf->mbox.lock);
54 return;
55 }
56
57 otx2_sync_mbox_msg(&pfvf->mbox);
58 mutex_unlock(&pfvf->mbox.lock);
59 }
60
otx2_update_lmac_fec_stats(struct otx2_nic * pfvf)61 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
62 {
63 struct msg_req *req;
64
65 if (!netif_running(pfvf->netdev))
66 return;
67 mutex_lock(&pfvf->mbox.lock);
68 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
69 if (req)
70 otx2_sync_mbox_msg(&pfvf->mbox);
71 mutex_unlock(&pfvf->mbox.lock);
72 }
73
otx2_update_rq_stats(struct otx2_nic * pfvf,int qidx)74 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
75 {
76 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
77
78 if (!pfvf->qset.rq)
79 return 0;
80
81 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
82 return 1;
83 }
84
otx2_update_sq_stats(struct otx2_nic * pfvf,int qidx)85 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
86 {
87 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
88
89 if (!pfvf->qset.sq)
90 return 0;
91
92 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
93 return 1;
94 }
95
otx2_get_dev_stats(struct otx2_nic * pfvf)96 void otx2_get_dev_stats(struct otx2_nic *pfvf)
97 {
98 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
99
100 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
101 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
102 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
103 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
104 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
105 dev_stats->rx_frames = dev_stats->rx_bcast_frames +
106 dev_stats->rx_mcast_frames +
107 dev_stats->rx_ucast_frames;
108
109 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
110 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
111 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
112 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
113 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
114 dev_stats->tx_frames = dev_stats->tx_bcast_frames +
115 dev_stats->tx_mcast_frames +
116 dev_stats->tx_ucast_frames;
117 }
118
otx2_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)119 void otx2_get_stats64(struct net_device *netdev,
120 struct rtnl_link_stats64 *stats)
121 {
122 struct otx2_nic *pfvf = netdev_priv(netdev);
123 struct otx2_dev_stats *dev_stats;
124
125 otx2_get_dev_stats(pfvf);
126
127 dev_stats = &pfvf->hw.dev_stats;
128 stats->rx_bytes = dev_stats->rx_bytes;
129 stats->rx_packets = dev_stats->rx_frames;
130 stats->rx_dropped = dev_stats->rx_drops;
131 stats->multicast = dev_stats->rx_mcast_frames;
132
133 stats->tx_bytes = dev_stats->tx_bytes;
134 stats->tx_packets = dev_stats->tx_frames;
135 stats->tx_dropped = dev_stats->tx_drops;
136 }
137 EXPORT_SYMBOL(otx2_get_stats64);
138
139 /* Sync MAC address with RVU AF */
otx2_hw_set_mac_addr(struct otx2_nic * pfvf,u8 * mac)140 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
141 {
142 struct nix_set_mac_addr *req;
143 int err;
144
145 mutex_lock(&pfvf->mbox.lock);
146 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
147 if (!req) {
148 mutex_unlock(&pfvf->mbox.lock);
149 return -ENOMEM;
150 }
151
152 ether_addr_copy(req->mac_addr, mac);
153
154 err = otx2_sync_mbox_msg(&pfvf->mbox);
155 mutex_unlock(&pfvf->mbox.lock);
156 return err;
157 }
158
otx2_hw_get_mac_addr(struct otx2_nic * pfvf,struct net_device * netdev)159 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
160 struct net_device *netdev)
161 {
162 struct nix_get_mac_addr_rsp *rsp;
163 struct mbox_msghdr *msghdr;
164 struct msg_req *req;
165 int err;
166
167 mutex_lock(&pfvf->mbox.lock);
168 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
169 if (!req) {
170 mutex_unlock(&pfvf->mbox.lock);
171 return -ENOMEM;
172 }
173
174 err = otx2_sync_mbox_msg(&pfvf->mbox);
175 if (err) {
176 mutex_unlock(&pfvf->mbox.lock);
177 return err;
178 }
179
180 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
181 if (IS_ERR(msghdr)) {
182 mutex_unlock(&pfvf->mbox.lock);
183 return PTR_ERR(msghdr);
184 }
185 rsp = (struct nix_get_mac_addr_rsp *)msghdr;
186 eth_hw_addr_set(netdev, rsp->mac_addr);
187 mutex_unlock(&pfvf->mbox.lock);
188
189 return 0;
190 }
191
otx2_set_mac_address(struct net_device * netdev,void * p)192 int otx2_set_mac_address(struct net_device *netdev, void *p)
193 {
194 struct otx2_nic *pfvf = netdev_priv(netdev);
195 struct sockaddr *addr = p;
196
197 if (!is_valid_ether_addr(addr->sa_data))
198 return -EADDRNOTAVAIL;
199
200 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
201 eth_hw_addr_set(netdev, addr->sa_data);
202 /* update dmac field in vlan offload rule */
203 if (netif_running(netdev) &&
204 pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
205 otx2_install_rxvlan_offload_flow(pfvf);
206 /* update dmac address in ntuple and DMAC filter list */
207 if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
208 otx2_dmacflt_update_pfmac_flow(pfvf);
209 } else {
210 return -EPERM;
211 }
212
213 return 0;
214 }
215 EXPORT_SYMBOL(otx2_set_mac_address);
216
otx2_hw_set_mtu(struct otx2_nic * pfvf,int mtu)217 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
218 {
219 struct nix_frs_cfg *req;
220 u16 maxlen;
221 int err;
222
223 maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
224
225 mutex_lock(&pfvf->mbox.lock);
226 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
227 if (!req) {
228 mutex_unlock(&pfvf->mbox.lock);
229 return -ENOMEM;
230 }
231
232 req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
233
234 /* Use max receive length supported by hardware for loopback devices */
235 if (is_otx2_lbkvf(pfvf->pdev))
236 req->maxlen = maxlen;
237
238 err = otx2_sync_mbox_msg(&pfvf->mbox);
239 mutex_unlock(&pfvf->mbox.lock);
240 return err;
241 }
242
otx2_config_pause_frm(struct otx2_nic * pfvf)243 int otx2_config_pause_frm(struct otx2_nic *pfvf)
244 {
245 struct cgx_pause_frm_cfg *req;
246 int err;
247
248 if (is_otx2_lbkvf(pfvf->pdev))
249 return 0;
250
251 mutex_lock(&pfvf->mbox.lock);
252 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
253 if (!req) {
254 err = -ENOMEM;
255 goto unlock;
256 }
257
258 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
259 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
260 req->set = 1;
261
262 err = otx2_sync_mbox_msg(&pfvf->mbox);
263 unlock:
264 mutex_unlock(&pfvf->mbox.lock);
265 return err;
266 }
267 EXPORT_SYMBOL(otx2_config_pause_frm);
268
otx2_set_flowkey_cfg(struct otx2_nic * pfvf)269 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
270 {
271 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
272 struct nix_rss_flowkey_cfg_rsp *rsp;
273 struct nix_rss_flowkey_cfg *req;
274 int err;
275
276 mutex_lock(&pfvf->mbox.lock);
277 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
278 if (!req) {
279 mutex_unlock(&pfvf->mbox.lock);
280 return -ENOMEM;
281 }
282 req->mcam_index = -1; /* Default or reserved index */
283 req->flowkey_cfg = rss->flowkey_cfg;
284 req->group = DEFAULT_RSS_CONTEXT_GROUP;
285
286 err = otx2_sync_mbox_msg(&pfvf->mbox);
287 if (err)
288 goto fail;
289
290 rsp = (struct nix_rss_flowkey_cfg_rsp *)
291 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
292 if (IS_ERR(rsp)) {
293 err = PTR_ERR(rsp);
294 goto fail;
295 }
296
297 pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
298 fail:
299 mutex_unlock(&pfvf->mbox.lock);
300 return err;
301 }
302
otx2_set_rss_table(struct otx2_nic * pfvf,int ctx_id)303 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
304 {
305 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
306 const int index = rss->rss_size * ctx_id;
307 struct mbox *mbox = &pfvf->mbox;
308 struct otx2_rss_ctx *rss_ctx;
309 struct nix_aq_enq_req *aq;
310 int idx, err;
311
312 mutex_lock(&mbox->lock);
313 rss_ctx = rss->rss_ctx[ctx_id];
314 /* Get memory to put this msg */
315 for (idx = 0; idx < rss->rss_size; idx++) {
316 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
317 if (!aq) {
318 /* The shared memory buffer can be full.
319 * Flush it and retry
320 */
321 err = otx2_sync_mbox_msg(mbox);
322 if (err) {
323 mutex_unlock(&mbox->lock);
324 return err;
325 }
326 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
327 if (!aq) {
328 mutex_unlock(&mbox->lock);
329 return -ENOMEM;
330 }
331 }
332
333 aq->rss.rq = rss_ctx->ind_tbl[idx];
334
335 /* Fill AQ info */
336 aq->qidx = index + idx;
337 aq->ctype = NIX_AQ_CTYPE_RSS;
338 aq->op = NIX_AQ_INSTOP_INIT;
339 }
340 err = otx2_sync_mbox_msg(mbox);
341 mutex_unlock(&mbox->lock);
342 return err;
343 }
344
otx2_set_rss_key(struct otx2_nic * pfvf)345 void otx2_set_rss_key(struct otx2_nic *pfvf)
346 {
347 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
348 u64 *key = (u64 *)&rss->key[4];
349 int idx;
350
351 /* 352bit or 44byte key needs to be configured as below
352 * NIX_LF_RX_SECRETX0 = key<351:288>
353 * NIX_LF_RX_SECRETX1 = key<287:224>
354 * NIX_LF_RX_SECRETX2 = key<223:160>
355 * NIX_LF_RX_SECRETX3 = key<159:96>
356 * NIX_LF_RX_SECRETX4 = key<95:32>
357 * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
358 */
359 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
360 (u64)(*((u32 *)&rss->key)) << 32);
361 idx = sizeof(rss->key) / sizeof(u64);
362 while (idx > 0) {
363 idx--;
364 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
365 }
366 }
367
otx2_rss_init(struct otx2_nic * pfvf)368 int otx2_rss_init(struct otx2_nic *pfvf)
369 {
370 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
371 struct otx2_rss_ctx *rss_ctx;
372 int idx, ret = 0;
373
374 rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
375
376 /* Init RSS key if it is not setup already */
377 if (!rss->enable)
378 netdev_rss_key_fill(rss->key, sizeof(rss->key));
379 otx2_set_rss_key(pfvf);
380
381 if (!netif_is_rxfh_configured(pfvf->netdev)) {
382 /* Set RSS group 0 as default indirection table */
383 rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
384 GFP_KERNEL);
385 if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
386 return -ENOMEM;
387
388 rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
389 for (idx = 0; idx < rss->rss_size; idx++)
390 rss_ctx->ind_tbl[idx] =
391 ethtool_rxfh_indir_default(idx,
392 pfvf->hw.rx_queues);
393 }
394 ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
395 if (ret)
396 return ret;
397
398 /* Flowkey or hash config to be used for generating flow tag */
399 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
400 NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
401 NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
402 NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
403 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
404
405 ret = otx2_set_flowkey_cfg(pfvf);
406 if (ret)
407 return ret;
408
409 rss->enable = true;
410 return 0;
411 }
412
413 /* Setup UDP segmentation algorithm in HW */
otx2_setup_udp_segmentation(struct nix_lso_format_cfg * lso,bool v4)414 static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
415 {
416 struct nix_lso_format *field;
417
418 field = (struct nix_lso_format *)&lso->fields[0];
419 lso->field_mask = GENMASK(18, 0);
420
421 /* IP's Length field */
422 field->layer = NIX_TXLAYER_OL3;
423 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
424 field->offset = v4 ? 2 : 4;
425 field->sizem1 = 1; /* i.e 2 bytes */
426 field->alg = NIX_LSOALG_ADD_PAYLEN;
427 field++;
428
429 /* No ID field in IPv6 header */
430 if (v4) {
431 /* Increment IPID */
432 field->layer = NIX_TXLAYER_OL3;
433 field->offset = 4;
434 field->sizem1 = 1; /* i.e 2 bytes */
435 field->alg = NIX_LSOALG_ADD_SEGNUM;
436 field++;
437 }
438
439 /* Update length in UDP header */
440 field->layer = NIX_TXLAYER_OL4;
441 field->offset = 4;
442 field->sizem1 = 1;
443 field->alg = NIX_LSOALG_ADD_PAYLEN;
444 }
445
446 /* Setup segmentation algorithms in HW and retrieve algorithm index */
otx2_setup_segmentation(struct otx2_nic * pfvf)447 void otx2_setup_segmentation(struct otx2_nic *pfvf)
448 {
449 struct nix_lso_format_cfg_rsp *rsp;
450 struct nix_lso_format_cfg *lso;
451 struct otx2_hw *hw = &pfvf->hw;
452 int err;
453
454 mutex_lock(&pfvf->mbox.lock);
455
456 /* UDPv4 segmentation */
457 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
458 if (!lso)
459 goto fail;
460
461 /* Setup UDP/IP header fields that HW should update per segment */
462 otx2_setup_udp_segmentation(lso, true);
463
464 err = otx2_sync_mbox_msg(&pfvf->mbox);
465 if (err)
466 goto fail;
467
468 rsp = (struct nix_lso_format_cfg_rsp *)
469 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
470 if (IS_ERR(rsp))
471 goto fail;
472
473 hw->lso_udpv4_idx = rsp->lso_format_idx;
474
475 /* UDPv6 segmentation */
476 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
477 if (!lso)
478 goto fail;
479
480 /* Setup UDP/IP header fields that HW should update per segment */
481 otx2_setup_udp_segmentation(lso, false);
482
483 err = otx2_sync_mbox_msg(&pfvf->mbox);
484 if (err)
485 goto fail;
486
487 rsp = (struct nix_lso_format_cfg_rsp *)
488 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
489 if (IS_ERR(rsp))
490 goto fail;
491
492 hw->lso_udpv6_idx = rsp->lso_format_idx;
493 mutex_unlock(&pfvf->mbox.lock);
494 return;
495 fail:
496 mutex_unlock(&pfvf->mbox.lock);
497 netdev_info(pfvf->netdev,
498 "Failed to get LSO index for UDP GSO offload, disabling\n");
499 pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
500 }
501
otx2_config_irq_coalescing(struct otx2_nic * pfvf,int qidx)502 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
503 {
504 /* Configure CQE interrupt coalescing parameters
505 *
506 * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
507 * set 1 less than cq_ecount_wait. And cq_time_wait is in
508 * usecs, convert that to 100ns count.
509 */
510 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
511 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
512 ((u64)pfvf->hw.cq_qcount_wait << 32) |
513 (pfvf->hw.cq_ecount_wait - 1));
514 }
515
__otx2_alloc_rbuf(struct otx2_nic * pfvf,struct otx2_pool * pool,dma_addr_t * dma)516 int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
517 dma_addr_t *dma)
518 {
519 u8 *buf;
520
521 buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
522 if (unlikely(!buf))
523 return -ENOMEM;
524
525 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
526 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
527 if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
528 page_frag_free(buf);
529 return -ENOMEM;
530 }
531
532 return 0;
533 }
534
otx2_alloc_rbuf(struct otx2_nic * pfvf,struct otx2_pool * pool,dma_addr_t * dma)535 static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
536 dma_addr_t *dma)
537 {
538 int ret;
539
540 local_bh_disable();
541 ret = __otx2_alloc_rbuf(pfvf, pool, dma);
542 local_bh_enable();
543 return ret;
544 }
545
otx2_alloc_buffer(struct otx2_nic * pfvf,struct otx2_cq_queue * cq,dma_addr_t * dma)546 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
547 dma_addr_t *dma)
548 {
549 if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
550 struct refill_work *work;
551 struct delayed_work *dwork;
552
553 work = &pfvf->refill_wrk[cq->cq_idx];
554 dwork = &work->pool_refill_work;
555 /* Schedule a task if no other task is running */
556 if (!cq->refill_task_sched) {
557 cq->refill_task_sched = true;
558 schedule_delayed_work(dwork,
559 msecs_to_jiffies(100));
560 }
561 return -ENOMEM;
562 }
563 return 0;
564 }
565
otx2_tx_timeout(struct net_device * netdev,unsigned int txq)566 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
567 {
568 struct otx2_nic *pfvf = netdev_priv(netdev);
569
570 schedule_work(&pfvf->reset_task);
571 }
572 EXPORT_SYMBOL(otx2_tx_timeout);
573
otx2_get_mac_from_af(struct net_device * netdev)574 void otx2_get_mac_from_af(struct net_device *netdev)
575 {
576 struct otx2_nic *pfvf = netdev_priv(netdev);
577 int err;
578
579 err = otx2_hw_get_mac_addr(pfvf, netdev);
580 if (err)
581 dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
582
583 /* If AF doesn't provide a valid MAC, generate a random one */
584 if (!is_valid_ether_addr(netdev->dev_addr))
585 eth_hw_addr_random(netdev);
586 }
587 EXPORT_SYMBOL(otx2_get_mac_from_af);
588
otx2_txschq_config(struct otx2_nic * pfvf,int lvl)589 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
590 {
591 struct otx2_hw *hw = &pfvf->hw;
592 struct nix_txschq_config *req;
593 u64 schq, parent;
594 u64 dwrr_val;
595
596 dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
597
598 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
599 if (!req)
600 return -ENOMEM;
601
602 req->lvl = lvl;
603 req->num_regs = 1;
604
605 schq = hw->txschq_list[lvl][0];
606 /* Set topology e.t.c configuration */
607 if (lvl == NIX_TXSCH_LVL_SMQ) {
608 req->reg[0] = NIX_AF_SMQX_CFG(schq);
609 req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
610 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
611 (0x2ULL << 36);
612 req->num_regs++;
613 /* MDQ config */
614 parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
615 req->reg[1] = NIX_AF_MDQX_PARENT(schq);
616 req->regval[1] = parent << 16;
617 req->num_regs++;
618 /* Set DWRR quantum */
619 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
620 req->regval[2] = dwrr_val;
621 } else if (lvl == NIX_TXSCH_LVL_TL4) {
622 parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
623 req->reg[0] = NIX_AF_TL4X_PARENT(schq);
624 req->regval[0] = parent << 16;
625 req->num_regs++;
626 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
627 req->regval[1] = dwrr_val;
628 } else if (lvl == NIX_TXSCH_LVL_TL3) {
629 parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
630 req->reg[0] = NIX_AF_TL3X_PARENT(schq);
631 req->regval[0] = parent << 16;
632 req->num_regs++;
633 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
634 req->regval[1] = dwrr_val;
635 if (lvl == hw->txschq_link_cfg_lvl) {
636 req->num_regs++;
637 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
638 /* Enable this queue and backpressure */
639 req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
640 }
641 } else if (lvl == NIX_TXSCH_LVL_TL2) {
642 parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
643 req->reg[0] = NIX_AF_TL2X_PARENT(schq);
644 req->regval[0] = parent << 16;
645
646 req->num_regs++;
647 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
648 req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
649
650 if (lvl == hw->txschq_link_cfg_lvl) {
651 req->num_regs++;
652 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
653 /* Enable this queue and backpressure */
654 req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
655 }
656 } else if (lvl == NIX_TXSCH_LVL_TL1) {
657 /* Default config for TL1.
658 * For VF this is always ignored.
659 */
660
661 /* On CN10K, if RR_WEIGHT is greater than 16384, HW will
662 * clip it to 16384, so configuring a 24bit max value
663 * will work on both OTx2 and CN10K.
664 */
665 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
666 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
667
668 req->num_regs++;
669 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
670 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
671
672 req->num_regs++;
673 req->reg[2] = NIX_AF_TL1X_CIR(schq);
674 req->regval[2] = 0;
675 }
676
677 return otx2_sync_mbox_msg(&pfvf->mbox);
678 }
679
otx2_txsch_alloc(struct otx2_nic * pfvf)680 int otx2_txsch_alloc(struct otx2_nic *pfvf)
681 {
682 struct nix_txsch_alloc_req *req;
683 int lvl;
684
685 /* Get memory to put this msg */
686 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
687 if (!req)
688 return -ENOMEM;
689
690 /* Request one schq per level */
691 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
692 req->schq[lvl] = 1;
693
694 return otx2_sync_mbox_msg(&pfvf->mbox);
695 }
696
otx2_txschq_stop(struct otx2_nic * pfvf)697 int otx2_txschq_stop(struct otx2_nic *pfvf)
698 {
699 struct nix_txsch_free_req *free_req;
700 int lvl, schq, err;
701
702 mutex_lock(&pfvf->mbox.lock);
703 /* Free the transmit schedulers */
704 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
705 if (!free_req) {
706 mutex_unlock(&pfvf->mbox.lock);
707 return -ENOMEM;
708 }
709
710 free_req->flags = TXSCHQ_FREE_ALL;
711 err = otx2_sync_mbox_msg(&pfvf->mbox);
712 mutex_unlock(&pfvf->mbox.lock);
713
714 /* Clear the txschq list */
715 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
716 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
717 pfvf->hw.txschq_list[lvl][schq] = 0;
718 }
719 return err;
720 }
721
otx2_sqb_flush(struct otx2_nic * pfvf)722 void otx2_sqb_flush(struct otx2_nic *pfvf)
723 {
724 int qidx, sqe_tail, sqe_head;
725 u64 incr, *ptr, val;
726 int timeout = 1000;
727
728 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
729 for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
730 incr = (u64)qidx << 32;
731 while (timeout) {
732 val = otx2_atomic64_add(incr, ptr);
733 sqe_head = (val >> 20) & 0x3F;
734 sqe_tail = (val >> 28) & 0x3F;
735 if (sqe_head == sqe_tail)
736 break;
737 usleep_range(1, 3);
738 timeout--;
739 }
740 }
741 }
742
743 /* RED and drop levels of CQ on packet reception.
744 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
745 */
746 #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
747 #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
748
749 /* RED and drop levels of AURA for packet reception.
750 * For AURA level is measure of fullness (0x0 = empty, 255 = full).
751 * Eg: For RQ length 1K, for pass/drop level 204/230.
752 * RED accepts pkts if free pointers > 102 & <= 205.
753 * Drops pkts if free pointers < 102.
754 */
755 #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
756 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
757 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
758
otx2_rq_init(struct otx2_nic * pfvf,u16 qidx,u16 lpb_aura)759 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
760 {
761 struct otx2_qset *qset = &pfvf->qset;
762 struct nix_aq_enq_req *aq;
763
764 /* Get memory to put this msg */
765 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
766 if (!aq)
767 return -ENOMEM;
768
769 aq->rq.cq = qidx;
770 aq->rq.ena = 1;
771 aq->rq.pb_caching = 1;
772 aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
773 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
774 aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
775 aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
776 aq->rq.qint_idx = 0;
777 aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
778 aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
779 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
780 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
781 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
782 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
783
784 /* Fill AQ info */
785 aq->qidx = qidx;
786 aq->ctype = NIX_AQ_CTYPE_RQ;
787 aq->op = NIX_AQ_INSTOP_INIT;
788
789 return otx2_sync_mbox_msg(&pfvf->mbox);
790 }
791
otx2_sq_aq_init(void * dev,u16 qidx,u16 sqb_aura)792 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
793 {
794 struct otx2_nic *pfvf = dev;
795 struct otx2_snd_queue *sq;
796 struct nix_aq_enq_req *aq;
797
798 sq = &pfvf->qset.sq[qidx];
799 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
800 /* Get memory to put this msg */
801 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
802 if (!aq)
803 return -ENOMEM;
804
805 aq->sq.cq = pfvf->hw.rx_queues + qidx;
806 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
807 aq->sq.cq_ena = 1;
808 aq->sq.ena = 1;
809 /* Only one SMQ is allocated, map all SQ's to that SMQ */
810 aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
811 aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
812 aq->sq.default_chan = pfvf->hw.tx_chan_base;
813 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
814 aq->sq.sqb_aura = sqb_aura;
815 aq->sq.sq_int_ena = NIX_SQINT_BITS;
816 aq->sq.qint_idx = 0;
817 /* Due pipelining impact minimum 2000 unused SQ CQE's
818 * need to maintain to avoid CQ overflow.
819 */
820 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
821
822 /* Fill AQ info */
823 aq->qidx = qidx;
824 aq->ctype = NIX_AQ_CTYPE_SQ;
825 aq->op = NIX_AQ_INSTOP_INIT;
826
827 return otx2_sync_mbox_msg(&pfvf->mbox);
828 }
829
otx2_sq_init(struct otx2_nic * pfvf,u16 qidx,u16 sqb_aura)830 static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
831 {
832 struct otx2_qset *qset = &pfvf->qset;
833 struct otx2_snd_queue *sq;
834 struct otx2_pool *pool;
835 int err;
836
837 pool = &pfvf->qset.pool[sqb_aura];
838 sq = &qset->sq[qidx];
839 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
840 sq->sqe_cnt = qset->sqe_cnt;
841
842 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
843 if (err)
844 return err;
845
846 if (qidx < pfvf->hw.tx_queues) {
847 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
848 TSO_HEADER_SIZE);
849 if (err)
850 return err;
851 }
852
853 sq->sqe_base = sq->sqe->base;
854 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
855 if (!sq->sg)
856 return -ENOMEM;
857
858 if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
859 err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
860 sizeof(*sq->timestamps));
861 if (err)
862 return err;
863 }
864
865 sq->head = 0;
866 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
867 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
868 /* Set SQE threshold to 10% of total SQEs */
869 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
870 sq->aura_id = sqb_aura;
871 sq->aura_fc_addr = pool->fc_addr->base;
872 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
873
874 sq->stats.bytes = 0;
875 sq->stats.pkts = 0;
876
877 return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
878
879 }
880
otx2_cq_init(struct otx2_nic * pfvf,u16 qidx)881 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
882 {
883 struct otx2_qset *qset = &pfvf->qset;
884 int err, pool_id, non_xdp_queues;
885 struct nix_aq_enq_req *aq;
886 struct otx2_cq_queue *cq;
887
888 cq = &qset->cq[qidx];
889 cq->cq_idx = qidx;
890 non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
891 if (qidx < pfvf->hw.rx_queues) {
892 cq->cq_type = CQ_RX;
893 cq->cint_idx = qidx;
894 cq->cqe_cnt = qset->rqe_cnt;
895 if (pfvf->xdp_prog)
896 xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
897 } else if (qidx < non_xdp_queues) {
898 cq->cq_type = CQ_TX;
899 cq->cint_idx = qidx - pfvf->hw.rx_queues;
900 cq->cqe_cnt = qset->sqe_cnt;
901 } else {
902 cq->cq_type = CQ_XDP;
903 cq->cint_idx = qidx - non_xdp_queues;
904 cq->cqe_cnt = qset->sqe_cnt;
905 }
906 cq->cqe_size = pfvf->qset.xqe_size;
907
908 /* Allocate memory for CQEs */
909 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
910 if (err)
911 return err;
912
913 /* Save CQE CPU base for faster reference */
914 cq->cqe_base = cq->cqe->base;
915 /* In case where all RQs auras point to single pool,
916 * all CQs receive buffer pool also point to same pool.
917 */
918 pool_id = ((cq->cq_type == CQ_RX) &&
919 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
920 cq->rbpool = &qset->pool[pool_id];
921 cq->refill_task_sched = false;
922
923 /* Get memory to put this msg */
924 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
925 if (!aq)
926 return -ENOMEM;
927
928 aq->cq.ena = 1;
929 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
930 aq->cq.caching = 1;
931 aq->cq.base = cq->cqe->iova;
932 aq->cq.cint_idx = cq->cint_idx;
933 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
934 aq->cq.qint_idx = 0;
935 aq->cq.avg_level = 255;
936
937 if (qidx < pfvf->hw.rx_queues) {
938 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
939 aq->cq.drop_ena = 1;
940
941 if (!is_otx2_lbkvf(pfvf->pdev)) {
942 /* Enable receive CQ backpressure */
943 aq->cq.bp_ena = 1;
944 #ifdef CONFIG_DCB
945 aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
946 #else
947 aq->cq.bpid = pfvf->bpid[0];
948 #endif
949
950 /* Set backpressure level is same as cq pass level */
951 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
952 }
953 }
954
955 /* Fill AQ info */
956 aq->qidx = qidx;
957 aq->ctype = NIX_AQ_CTYPE_CQ;
958 aq->op = NIX_AQ_INSTOP_INIT;
959
960 return otx2_sync_mbox_msg(&pfvf->mbox);
961 }
962
otx2_pool_refill_task(struct work_struct * work)963 static void otx2_pool_refill_task(struct work_struct *work)
964 {
965 struct otx2_cq_queue *cq;
966 struct otx2_pool *rbpool;
967 struct refill_work *wrk;
968 int qidx, free_ptrs = 0;
969 struct otx2_nic *pfvf;
970 dma_addr_t bufptr;
971
972 wrk = container_of(work, struct refill_work, pool_refill_work.work);
973 pfvf = wrk->pf;
974 qidx = wrk - pfvf->refill_wrk;
975 cq = &pfvf->qset.cq[qidx];
976 rbpool = cq->rbpool;
977 free_ptrs = cq->pool_ptrs;
978
979 while (cq->pool_ptrs) {
980 if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
981 /* Schedule a WQ if we fails to free atleast half of the
982 * pointers else enable napi for this RQ.
983 */
984 if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
985 struct delayed_work *dwork;
986
987 dwork = &wrk->pool_refill_work;
988 schedule_delayed_work(dwork,
989 msecs_to_jiffies(100));
990 } else {
991 cq->refill_task_sched = false;
992 }
993 return;
994 }
995 pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
996 cq->pool_ptrs--;
997 }
998 cq->refill_task_sched = false;
999 }
1000
otx2_config_nix_queues(struct otx2_nic * pfvf)1001 int otx2_config_nix_queues(struct otx2_nic *pfvf)
1002 {
1003 int qidx, err;
1004
1005 /* Initialize RX queues */
1006 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
1007 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1008
1009 err = otx2_rq_init(pfvf, qidx, lpb_aura);
1010 if (err)
1011 return err;
1012 }
1013
1014 /* Initialize TX queues */
1015 for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
1016 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1017
1018 err = otx2_sq_init(pfvf, qidx, sqb_aura);
1019 if (err)
1020 return err;
1021 }
1022
1023 /* Initialize completion queues */
1024 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1025 err = otx2_cq_init(pfvf, qidx);
1026 if (err)
1027 return err;
1028 }
1029
1030 pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
1031 NIX_LF_CQ_OP_STATUS);
1032
1033 /* Initialize work queue for receive buffer refill */
1034 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
1035 sizeof(struct refill_work), GFP_KERNEL);
1036 if (!pfvf->refill_wrk)
1037 return -ENOMEM;
1038
1039 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1040 pfvf->refill_wrk[qidx].pf = pfvf;
1041 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
1042 otx2_pool_refill_task);
1043 }
1044 return 0;
1045 }
1046
otx2_config_nix(struct otx2_nic * pfvf)1047 int otx2_config_nix(struct otx2_nic *pfvf)
1048 {
1049 struct nix_lf_alloc_req *nixlf;
1050 struct nix_lf_alloc_rsp *rsp;
1051 int err;
1052
1053 pfvf->qset.xqe_size = pfvf->hw.xqe_size;
1054
1055 /* Get memory to put this msg */
1056 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
1057 if (!nixlf)
1058 return -ENOMEM;
1059
1060 /* Set RQ/SQ/CQ counts */
1061 nixlf->rq_cnt = pfvf->hw.rx_queues;
1062 nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
1063 nixlf->cq_cnt = pfvf->qset.cq_cnt;
1064 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
1065 nixlf->rss_grps = MAX_RSS_GROUPS;
1066 nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
1067 /* We don't know absolute NPA LF idx attached.
1068 * AF will replace 'RVU_DEFAULT_PF_FUNC' with
1069 * NPA LF attached to this RVU PF/VF.
1070 */
1071 nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
1072 /* Disable alignment pad, enable L2 length check,
1073 * enable L4 TCP/UDP checksum verification.
1074 */
1075 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
1076
1077 err = otx2_sync_mbox_msg(&pfvf->mbox);
1078 if (err)
1079 return err;
1080
1081 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
1082 &nixlf->hdr);
1083 if (IS_ERR(rsp))
1084 return PTR_ERR(rsp);
1085
1086 if (rsp->qints < 1)
1087 return -ENXIO;
1088
1089 return rsp->hdr.rc;
1090 }
1091
otx2_sq_free_sqbs(struct otx2_nic * pfvf)1092 void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
1093 {
1094 struct otx2_qset *qset = &pfvf->qset;
1095 struct otx2_hw *hw = &pfvf->hw;
1096 struct otx2_snd_queue *sq;
1097 int sqb, qidx;
1098 u64 iova, pa;
1099
1100 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
1101 sq = &qset->sq[qidx];
1102 if (!sq->sqb_ptrs)
1103 continue;
1104 for (sqb = 0; sqb < sq->sqb_count; sqb++) {
1105 if (!sq->sqb_ptrs[sqb])
1106 continue;
1107 iova = sq->sqb_ptrs[sqb];
1108 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1109 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
1110 DMA_FROM_DEVICE,
1111 DMA_ATTR_SKIP_CPU_SYNC);
1112 put_page(virt_to_page(phys_to_virt(pa)));
1113 }
1114 sq->sqb_count = 0;
1115 }
1116 }
1117
otx2_free_aura_ptr(struct otx2_nic * pfvf,int type)1118 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
1119 {
1120 int pool_id, pool_start = 0, pool_end = 0, size = 0;
1121 u64 iova, pa;
1122
1123 if (type == AURA_NIX_SQ) {
1124 pool_start = otx2_get_pool_idx(pfvf, type, 0);
1125 pool_end = pool_start + pfvf->hw.sqpool_cnt;
1126 size = pfvf->hw.sqb_size;
1127 }
1128 if (type == AURA_NIX_RQ) {
1129 pool_start = otx2_get_pool_idx(pfvf, type, 0);
1130 pool_end = pfvf->hw.rqpool_cnt;
1131 size = pfvf->rbsize;
1132 }
1133
1134 /* Free SQB and RQB pointers from the aura pool */
1135 for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
1136 iova = otx2_aura_allocptr(pfvf, pool_id);
1137 while (iova) {
1138 if (type == AURA_NIX_RQ)
1139 iova -= OTX2_HEAD_ROOM;
1140
1141 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1142 dma_unmap_page_attrs(pfvf->dev, iova, size,
1143 DMA_FROM_DEVICE,
1144 DMA_ATTR_SKIP_CPU_SYNC);
1145 put_page(virt_to_page(phys_to_virt(pa)));
1146 iova = otx2_aura_allocptr(pfvf, pool_id);
1147 }
1148 }
1149 }
1150
otx2_aura_pool_free(struct otx2_nic * pfvf)1151 void otx2_aura_pool_free(struct otx2_nic *pfvf)
1152 {
1153 struct otx2_pool *pool;
1154 int pool_id;
1155
1156 if (!pfvf->qset.pool)
1157 return;
1158
1159 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
1160 pool = &pfvf->qset.pool[pool_id];
1161 qmem_free(pfvf->dev, pool->stack);
1162 qmem_free(pfvf->dev, pool->fc_addr);
1163 }
1164 devm_kfree(pfvf->dev, pfvf->qset.pool);
1165 pfvf->qset.pool = NULL;
1166 }
1167
otx2_aura_init(struct otx2_nic * pfvf,int aura_id,int pool_id,int numptrs)1168 static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1169 int pool_id, int numptrs)
1170 {
1171 struct npa_aq_enq_req *aq;
1172 struct otx2_pool *pool;
1173 int err;
1174
1175 pool = &pfvf->qset.pool[pool_id];
1176
1177 /* Allocate memory for HW to update Aura count.
1178 * Alloc one cache line, so that it fits all FC_STYPE modes.
1179 */
1180 if (!pool->fc_addr) {
1181 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
1182 if (err)
1183 return err;
1184 }
1185
1186 /* Initialize this aura's context via AF */
1187 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1188 if (!aq) {
1189 /* Shared mbox memory buffer is full, flush it and retry */
1190 err = otx2_sync_mbox_msg(&pfvf->mbox);
1191 if (err)
1192 return err;
1193 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1194 if (!aq)
1195 return -ENOMEM;
1196 }
1197
1198 aq->aura_id = aura_id;
1199 /* Will be filled by AF with correct pool context address */
1200 aq->aura.pool_addr = pool_id;
1201 aq->aura.pool_caching = 1;
1202 aq->aura.shift = ilog2(numptrs) - 8;
1203 aq->aura.count = numptrs;
1204 aq->aura.limit = numptrs;
1205 aq->aura.avg_level = 255;
1206 aq->aura.ena = 1;
1207 aq->aura.fc_ena = 1;
1208 aq->aura.fc_addr = pool->fc_addr->iova;
1209 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
1210
1211 /* Enable backpressure for RQ aura */
1212 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
1213 aq->aura.bp_ena = 0;
1214 /* If NIX1 LF is attached then specify NIX1_RX.
1215 *
1216 * Below NPA_AURA_S[BP_ENA] is set according to the
1217 * NPA_BPINTF_E enumeration given as:
1218 * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
1219 * NIX0_RX is 0x0 + 0*0x1 = 0
1220 * NIX1_RX is 0x0 + 1*0x1 = 1
1221 * But in HRM it is given that
1222 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
1223 * NIX-RX based on [BP] level. One bit per NIX-RX; index
1224 * enumerated by NPA_BPINTF_E."
1225 */
1226 if (pfvf->nix_blkaddr == BLKADDR_NIX1)
1227 aq->aura.bp_ena = 1;
1228 #ifdef CONFIG_DCB
1229 aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
1230 #else
1231 aq->aura.nix0_bpid = pfvf->bpid[0];
1232 #endif
1233
1234 /* Set backpressure level for RQ's Aura */
1235 aq->aura.bp = RQ_BP_LVL_AURA;
1236 }
1237
1238 /* Fill AQ info */
1239 aq->ctype = NPA_AQ_CTYPE_AURA;
1240 aq->op = NPA_AQ_INSTOP_INIT;
1241
1242 return 0;
1243 }
1244
otx2_pool_init(struct otx2_nic * pfvf,u16 pool_id,int stack_pages,int numptrs,int buf_size)1245 static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1246 int stack_pages, int numptrs, int buf_size)
1247 {
1248 struct npa_aq_enq_req *aq;
1249 struct otx2_pool *pool;
1250 int err;
1251
1252 pool = &pfvf->qset.pool[pool_id];
1253 /* Alloc memory for stack which is used to store buffer pointers */
1254 err = qmem_alloc(pfvf->dev, &pool->stack,
1255 stack_pages, pfvf->hw.stack_pg_bytes);
1256 if (err)
1257 return err;
1258
1259 pool->rbsize = buf_size;
1260
1261 /* Initialize this pool's context via AF */
1262 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1263 if (!aq) {
1264 /* Shared mbox memory buffer is full, flush it and retry */
1265 err = otx2_sync_mbox_msg(&pfvf->mbox);
1266 if (err) {
1267 qmem_free(pfvf->dev, pool->stack);
1268 return err;
1269 }
1270 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1271 if (!aq) {
1272 qmem_free(pfvf->dev, pool->stack);
1273 return -ENOMEM;
1274 }
1275 }
1276
1277 aq->aura_id = pool_id;
1278 aq->pool.stack_base = pool->stack->iova;
1279 aq->pool.stack_caching = 1;
1280 aq->pool.ena = 1;
1281 aq->pool.buf_size = buf_size / 128;
1282 aq->pool.stack_max_pages = stack_pages;
1283 aq->pool.shift = ilog2(numptrs) - 8;
1284 aq->pool.ptr_start = 0;
1285 aq->pool.ptr_end = ~0ULL;
1286
1287 /* Fill AQ info */
1288 aq->ctype = NPA_AQ_CTYPE_POOL;
1289 aq->op = NPA_AQ_INSTOP_INIT;
1290
1291 return 0;
1292 }
1293
otx2_sq_aura_pool_init(struct otx2_nic * pfvf)1294 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
1295 {
1296 int qidx, pool_id, stack_pages, num_sqbs;
1297 struct otx2_qset *qset = &pfvf->qset;
1298 struct otx2_hw *hw = &pfvf->hw;
1299 struct otx2_snd_queue *sq;
1300 struct otx2_pool *pool;
1301 dma_addr_t bufptr;
1302 int err, ptr;
1303
1304 /* Calculate number of SQBs needed.
1305 *
1306 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
1307 * Last SQE is used for pointing to next SQB.
1308 */
1309 num_sqbs = (hw->sqb_size / 128) - 1;
1310 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
1311
1312 /* Get no of stack pages needed */
1313 stack_pages =
1314 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1315
1316 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
1317 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1318 /* Initialize aura context */
1319 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
1320 if (err)
1321 goto fail;
1322
1323 /* Initialize pool context */
1324 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1325 num_sqbs, hw->sqb_size);
1326 if (err)
1327 goto fail;
1328 }
1329
1330 /* Flush accumulated messages */
1331 err = otx2_sync_mbox_msg(&pfvf->mbox);
1332 if (err)
1333 goto fail;
1334
1335 /* Allocate pointers and free them to aura/pool */
1336 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
1337 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1338 pool = &pfvf->qset.pool[pool_id];
1339
1340 sq = &qset->sq[qidx];
1341 sq->sqb_count = 0;
1342 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
1343 if (!sq->sqb_ptrs)
1344 return -ENOMEM;
1345
1346 for (ptr = 0; ptr < num_sqbs; ptr++) {
1347 if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
1348 return -ENOMEM;
1349 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
1350 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
1351 }
1352 }
1353
1354 return 0;
1355 fail:
1356 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1357 otx2_aura_pool_free(pfvf);
1358 return err;
1359 }
1360
otx2_rq_aura_pool_init(struct otx2_nic * pfvf)1361 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
1362 {
1363 struct otx2_hw *hw = &pfvf->hw;
1364 int stack_pages, pool_id, rq;
1365 struct otx2_pool *pool;
1366 int err, ptr, num_ptrs;
1367 dma_addr_t bufptr;
1368
1369 num_ptrs = pfvf->qset.rqe_cnt;
1370
1371 stack_pages =
1372 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1373
1374 for (rq = 0; rq < hw->rx_queues; rq++) {
1375 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
1376 /* Initialize aura context */
1377 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
1378 if (err)
1379 goto fail;
1380 }
1381 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1382 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1383 num_ptrs, pfvf->rbsize);
1384 if (err)
1385 goto fail;
1386 }
1387
1388 /* Flush accumulated messages */
1389 err = otx2_sync_mbox_msg(&pfvf->mbox);
1390 if (err)
1391 goto fail;
1392
1393 /* Allocate pointers and free them to aura/pool */
1394 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1395 pool = &pfvf->qset.pool[pool_id];
1396 for (ptr = 0; ptr < num_ptrs; ptr++) {
1397 if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
1398 return -ENOMEM;
1399 pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
1400 bufptr + OTX2_HEAD_ROOM);
1401 }
1402 }
1403
1404 return 0;
1405 fail:
1406 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1407 otx2_aura_pool_free(pfvf);
1408 return err;
1409 }
1410
otx2_config_npa(struct otx2_nic * pfvf)1411 int otx2_config_npa(struct otx2_nic *pfvf)
1412 {
1413 struct otx2_qset *qset = &pfvf->qset;
1414 struct npa_lf_alloc_req *npalf;
1415 struct otx2_hw *hw = &pfvf->hw;
1416 int aura_cnt;
1417
1418 /* Pool - Stack of free buffer pointers
1419 * Aura - Alloc/frees pointers from/to pool for NIX DMA.
1420 */
1421
1422 if (!hw->pool_cnt)
1423 return -EINVAL;
1424
1425 qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
1426 sizeof(struct otx2_pool), GFP_KERNEL);
1427 if (!qset->pool)
1428 return -ENOMEM;
1429
1430 /* Get memory to put this msg */
1431 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
1432 if (!npalf)
1433 return -ENOMEM;
1434
1435 /* Set aura and pool counts */
1436 npalf->nr_pools = hw->pool_cnt;
1437 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
1438 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
1439
1440 return otx2_sync_mbox_msg(&pfvf->mbox);
1441 }
1442
otx2_detach_resources(struct mbox * mbox)1443 int otx2_detach_resources(struct mbox *mbox)
1444 {
1445 struct rsrc_detach *detach;
1446
1447 mutex_lock(&mbox->lock);
1448 detach = otx2_mbox_alloc_msg_detach_resources(mbox);
1449 if (!detach) {
1450 mutex_unlock(&mbox->lock);
1451 return -ENOMEM;
1452 }
1453
1454 /* detach all */
1455 detach->partial = false;
1456
1457 /* Send detach request to AF */
1458 otx2_mbox_msg_send(&mbox->mbox, 0);
1459 mutex_unlock(&mbox->lock);
1460 return 0;
1461 }
1462 EXPORT_SYMBOL(otx2_detach_resources);
1463
otx2_attach_npa_nix(struct otx2_nic * pfvf)1464 int otx2_attach_npa_nix(struct otx2_nic *pfvf)
1465 {
1466 struct rsrc_attach *attach;
1467 struct msg_req *msix;
1468 int err;
1469
1470 mutex_lock(&pfvf->mbox.lock);
1471 /* Get memory to put this msg */
1472 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
1473 if (!attach) {
1474 mutex_unlock(&pfvf->mbox.lock);
1475 return -ENOMEM;
1476 }
1477
1478 attach->npalf = true;
1479 attach->nixlf = true;
1480
1481 /* Send attach request to AF */
1482 err = otx2_sync_mbox_msg(&pfvf->mbox);
1483 if (err) {
1484 mutex_unlock(&pfvf->mbox.lock);
1485 return err;
1486 }
1487
1488 pfvf->nix_blkaddr = BLKADDR_NIX0;
1489
1490 /* If the platform has two NIX blocks then LF may be
1491 * allocated from NIX1.
1492 */
1493 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
1494 pfvf->nix_blkaddr = BLKADDR_NIX1;
1495
1496 /* Get NPA and NIX MSIX vector offsets */
1497 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
1498 if (!msix) {
1499 mutex_unlock(&pfvf->mbox.lock);
1500 return -ENOMEM;
1501 }
1502
1503 err = otx2_sync_mbox_msg(&pfvf->mbox);
1504 if (err) {
1505 mutex_unlock(&pfvf->mbox.lock);
1506 return err;
1507 }
1508 mutex_unlock(&pfvf->mbox.lock);
1509
1510 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
1511 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
1512 dev_err(pfvf->dev,
1513 "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
1514 return -EINVAL;
1515 }
1516
1517 return 0;
1518 }
1519 EXPORT_SYMBOL(otx2_attach_npa_nix);
1520
otx2_ctx_disable(struct mbox * mbox,int type,bool npa)1521 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
1522 {
1523 struct hwctx_disable_req *req;
1524
1525 mutex_lock(&mbox->lock);
1526 /* Request AQ to disable this context */
1527 if (npa)
1528 req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
1529 else
1530 req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
1531
1532 if (!req) {
1533 mutex_unlock(&mbox->lock);
1534 return;
1535 }
1536
1537 req->ctype = type;
1538
1539 if (otx2_sync_mbox_msg(mbox))
1540 dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
1541 __func__);
1542
1543 mutex_unlock(&mbox->lock);
1544 }
1545
otx2_nix_config_bp(struct otx2_nic * pfvf,bool enable)1546 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
1547 {
1548 struct nix_bp_cfg_req *req;
1549
1550 if (enable)
1551 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
1552 else
1553 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
1554
1555 if (!req)
1556 return -ENOMEM;
1557
1558 req->chan_base = 0;
1559 #ifdef CONFIG_DCB
1560 req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
1561 req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
1562 #else
1563 req->chan_cnt = 1;
1564 req->bpid_per_chan = 0;
1565 #endif
1566
1567
1568 return otx2_sync_mbox_msg(&pfvf->mbox);
1569 }
1570 EXPORT_SYMBOL(otx2_nix_config_bp);
1571
1572 /* Mbox message handlers */
mbox_handler_cgx_stats(struct otx2_nic * pfvf,struct cgx_stats_rsp * rsp)1573 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1574 struct cgx_stats_rsp *rsp)
1575 {
1576 int id;
1577
1578 for (id = 0; id < CGX_RX_STATS_COUNT; id++)
1579 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
1580 for (id = 0; id < CGX_TX_STATS_COUNT; id++)
1581 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
1582 }
1583
mbox_handler_cgx_fec_stats(struct otx2_nic * pfvf,struct cgx_fec_stats_rsp * rsp)1584 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
1585 struct cgx_fec_stats_rsp *rsp)
1586 {
1587 pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
1588 pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
1589 }
1590
mbox_handler_nix_txsch_alloc(struct otx2_nic * pf,struct nix_txsch_alloc_rsp * rsp)1591 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
1592 struct nix_txsch_alloc_rsp *rsp)
1593 {
1594 int lvl, schq;
1595
1596 /* Setup transmit scheduler list */
1597 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
1598 for (schq = 0; schq < rsp->schq[lvl]; schq++)
1599 pf->hw.txschq_list[lvl][schq] =
1600 rsp->schq_list[lvl][schq];
1601
1602 pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
1603 }
1604 EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
1605
mbox_handler_npa_lf_alloc(struct otx2_nic * pfvf,struct npa_lf_alloc_rsp * rsp)1606 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1607 struct npa_lf_alloc_rsp *rsp)
1608 {
1609 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
1610 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
1611 }
1612 EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
1613
mbox_handler_nix_lf_alloc(struct otx2_nic * pfvf,struct nix_lf_alloc_rsp * rsp)1614 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1615 struct nix_lf_alloc_rsp *rsp)
1616 {
1617 pfvf->hw.sqb_size = rsp->sqb_size;
1618 pfvf->hw.rx_chan_base = rsp->rx_chan_base;
1619 pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1620 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1621 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
1622 pfvf->hw.cgx_links = rsp->cgx_links;
1623 pfvf->hw.lbk_links = rsp->lbk_links;
1624 pfvf->hw.tx_link = rsp->tx_link;
1625 }
1626 EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
1627
mbox_handler_msix_offset(struct otx2_nic * pfvf,struct msix_offset_rsp * rsp)1628 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1629 struct msix_offset_rsp *rsp)
1630 {
1631 pfvf->hw.npa_msixoff = rsp->npa_msixoff;
1632 pfvf->hw.nix_msixoff = rsp->nix_msixoff;
1633 }
1634 EXPORT_SYMBOL(mbox_handler_msix_offset);
1635
mbox_handler_nix_bp_enable(struct otx2_nic * pfvf,struct nix_bp_cfg_rsp * rsp)1636 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1637 struct nix_bp_cfg_rsp *rsp)
1638 {
1639 int chan, chan_id;
1640
1641 for (chan = 0; chan < rsp->chan_cnt; chan++) {
1642 chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
1643 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
1644 }
1645 }
1646 EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
1647
otx2_free_cints(struct otx2_nic * pfvf,int n)1648 void otx2_free_cints(struct otx2_nic *pfvf, int n)
1649 {
1650 struct otx2_qset *qset = &pfvf->qset;
1651 struct otx2_hw *hw = &pfvf->hw;
1652 int irq, qidx;
1653
1654 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1655 qidx < n;
1656 qidx++, irq++) {
1657 int vector = pci_irq_vector(pfvf->pdev, irq);
1658
1659 irq_set_affinity_hint(vector, NULL);
1660 free_cpumask_var(hw->affinity_mask[irq]);
1661 free_irq(vector, &qset->napi[qidx]);
1662 }
1663 }
1664
otx2_set_cints_affinity(struct otx2_nic * pfvf)1665 void otx2_set_cints_affinity(struct otx2_nic *pfvf)
1666 {
1667 struct otx2_hw *hw = &pfvf->hw;
1668 int vec, cpu, irq, cint;
1669
1670 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1671 cpu = cpumask_first(cpu_online_mask);
1672
1673 /* CQ interrupts */
1674 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
1675 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
1676 return;
1677
1678 cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
1679
1680 irq = pci_irq_vector(pfvf->pdev, vec);
1681 irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
1682
1683 cpu = cpumask_next(cpu, cpu_online_mask);
1684 if (unlikely(cpu >= nr_cpu_ids))
1685 cpu = 0;
1686 }
1687 }
1688
otx2_get_max_mtu(struct otx2_nic * pfvf)1689 u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
1690 {
1691 struct nix_hw_info *rsp;
1692 struct msg_req *req;
1693 u16 max_mtu;
1694 int rc;
1695
1696 mutex_lock(&pfvf->mbox.lock);
1697
1698 req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
1699 if (!req) {
1700 rc = -ENOMEM;
1701 goto out;
1702 }
1703
1704 rc = otx2_sync_mbox_msg(&pfvf->mbox);
1705 if (!rc) {
1706 rsp = (struct nix_hw_info *)
1707 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
1708
1709 /* HW counts VLAN insertion bytes (8 for double tag)
1710 * irrespective of whether SQE is requesting to insert VLAN
1711 * in the packet or not. Hence these 8 bytes have to be
1712 * discounted from max packet size otherwise HW will throw
1713 * SMQ errors
1714 */
1715 max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
1716
1717 /* Also save DWRR MTU, needed for DWRR weight calculation */
1718 pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
1719 if (!pfvf->hw.dwrr_mtu)
1720 pfvf->hw.dwrr_mtu = 1;
1721 }
1722
1723 out:
1724 mutex_unlock(&pfvf->mbox.lock);
1725 if (rc) {
1726 dev_warn(pfvf->dev,
1727 "Failed to get MTU from hardware setting default value(1500)\n");
1728 max_mtu = 1500;
1729 }
1730 return max_mtu;
1731 }
1732 EXPORT_SYMBOL(otx2_get_max_mtu);
1733
otx2_handle_ntuple_tc_features(struct net_device * netdev,netdev_features_t features)1734 int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
1735 {
1736 netdev_features_t changed = features ^ netdev->features;
1737 struct otx2_nic *pfvf = netdev_priv(netdev);
1738 bool ntuple = !!(features & NETIF_F_NTUPLE);
1739 bool tc = !!(features & NETIF_F_HW_TC);
1740
1741 if ((changed & NETIF_F_NTUPLE) && !ntuple)
1742 otx2_destroy_ntuple_flows(pfvf);
1743
1744 if ((changed & NETIF_F_NTUPLE) && ntuple) {
1745 if (!pfvf->flow_cfg->max_flows) {
1746 netdev_err(netdev,
1747 "Can't enable NTUPLE, MCAM entries not allocated\n");
1748 return -EINVAL;
1749 }
1750 }
1751
1752 if ((changed & NETIF_F_HW_TC) && tc) {
1753 if (!pfvf->flow_cfg->max_flows) {
1754 netdev_err(netdev,
1755 "Can't enable TC, MCAM entries not allocated\n");
1756 return -EINVAL;
1757 }
1758 }
1759
1760 if ((changed & NETIF_F_HW_TC) && !tc &&
1761 pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
1762 netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1763 return -EBUSY;
1764 }
1765
1766 if ((changed & NETIF_F_NTUPLE) && ntuple &&
1767 (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
1768 netdev_err(netdev,
1769 "Can't enable NTUPLE when TC is active, disable TC and retry\n");
1770 return -EINVAL;
1771 }
1772
1773 if ((changed & NETIF_F_HW_TC) && tc &&
1774 (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
1775 netdev_err(netdev,
1776 "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
1777 return -EINVAL;
1778 }
1779
1780 return 0;
1781 }
1782 EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
1783
1784 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1785 int __weak \
1786 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
1787 struct _req_type *req, \
1788 struct _rsp_type *rsp) \
1789 { \
1790 /* Nothing to do here */ \
1791 return 0; \
1792 } \
1793 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
1794 MBOX_UP_CGX_MESSAGES
1795 #undef M
1796