1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "cgx.h"
16 #include "lmac_common.h"
17 #include "rvu_npc_hash.h"
18
19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 int type, int chan_id);
22 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
23 int type, bool add);
24 static int nix_setup_ipolicers(struct rvu *rvu,
25 struct nix_hw *nix_hw, int blkaddr);
26 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
27 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
28 struct nix_hw *nix_hw, u16 pcifunc);
29 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
30 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
31 u32 leaf_prof);
32 static const char *nix_get_ctx_name(int ctype);
33
34 enum mc_tbl_sz {
35 MC_TBL_SZ_256,
36 MC_TBL_SZ_512,
37 MC_TBL_SZ_1K,
38 MC_TBL_SZ_2K,
39 MC_TBL_SZ_4K,
40 MC_TBL_SZ_8K,
41 MC_TBL_SZ_16K,
42 MC_TBL_SZ_32K,
43 MC_TBL_SZ_64K,
44 };
45
46 enum mc_buf_cnt {
47 MC_BUF_CNT_8,
48 MC_BUF_CNT_16,
49 MC_BUF_CNT_32,
50 MC_BUF_CNT_64,
51 MC_BUF_CNT_128,
52 MC_BUF_CNT_256,
53 MC_BUF_CNT_512,
54 MC_BUF_CNT_1024,
55 MC_BUF_CNT_2048,
56 };
57
58 enum nix_makr_fmt_indexes {
59 NIX_MARK_CFG_IP_DSCP_RED,
60 NIX_MARK_CFG_IP_DSCP_YELLOW,
61 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
62 NIX_MARK_CFG_IP_ECN_RED,
63 NIX_MARK_CFG_IP_ECN_YELLOW,
64 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
65 NIX_MARK_CFG_VLAN_DEI_RED,
66 NIX_MARK_CFG_VLAN_DEI_YELLOW,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
68 NIX_MARK_CFG_MAX,
69 };
70
71 /* For now considering MC resources needed for broadcast
72 * pkt replication only. i.e 256 HWVFs + 12 PFs.
73 */
74 #define MC_TBL_SIZE MC_TBL_SZ_512
75 #define MC_BUF_CNT MC_BUF_CNT_128
76
77 struct mce {
78 struct hlist_node node;
79 u16 pcifunc;
80 };
81
rvu_get_next_nix_blkaddr(struct rvu * rvu,int blkaddr)82 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
83 {
84 int i = 0;
85
86 /*If blkaddr is 0, return the first nix block address*/
87 if (blkaddr == 0)
88 return rvu->nix_blkaddr[blkaddr];
89
90 while (i + 1 < MAX_NIX_BLKS) {
91 if (rvu->nix_blkaddr[i] == blkaddr)
92 return rvu->nix_blkaddr[i + 1];
93 i++;
94 }
95
96 return 0;
97 }
98
is_nixlf_attached(struct rvu * rvu,u16 pcifunc)99 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
100 {
101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
102 int blkaddr;
103
104 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
105 if (!pfvf->nixlf || blkaddr < 0)
106 return false;
107 return true;
108 }
109
rvu_get_nixlf_count(struct rvu * rvu)110 int rvu_get_nixlf_count(struct rvu *rvu)
111 {
112 int blkaddr = 0, max = 0;
113 struct rvu_block *block;
114
115 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
116 while (blkaddr) {
117 block = &rvu->hw->block[blkaddr];
118 max += block->lf.max;
119 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
120 }
121 return max;
122 }
123
nix_get_nixlf(struct rvu * rvu,u16 pcifunc,int * nixlf,int * nix_blkaddr)124 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
125 {
126 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
127 struct rvu_hwinfo *hw = rvu->hw;
128 int blkaddr;
129
130 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
131 if (!pfvf->nixlf || blkaddr < 0)
132 return NIX_AF_ERR_AF_LF_INVALID;
133
134 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
135 if (*nixlf < 0)
136 return NIX_AF_ERR_AF_LF_INVALID;
137
138 if (nix_blkaddr)
139 *nix_blkaddr = blkaddr;
140
141 return 0;
142 }
143
nix_get_struct_ptrs(struct rvu * rvu,u16 pcifunc,struct nix_hw ** nix_hw,int * blkaddr)144 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
145 struct nix_hw **nix_hw, int *blkaddr)
146 {
147 struct rvu_pfvf *pfvf;
148
149 pfvf = rvu_get_pfvf(rvu, pcifunc);
150 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
151 if (!pfvf->nixlf || *blkaddr < 0)
152 return NIX_AF_ERR_AF_LF_INVALID;
153
154 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
155 if (!*nix_hw)
156 return NIX_AF_ERR_INVALID_NIXBLK;
157 return 0;
158 }
159
nix_mce_list_init(struct nix_mce_list * list,int max)160 static void nix_mce_list_init(struct nix_mce_list *list, int max)
161 {
162 INIT_HLIST_HEAD(&list->head);
163 list->count = 0;
164 list->max = max;
165 }
166
nix_alloc_mce_list(struct nix_mcast * mcast,int count)167 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
168 {
169 int idx;
170
171 if (!mcast)
172 return 0;
173
174 idx = mcast->next_free_mce;
175 mcast->next_free_mce += count;
176 return idx;
177 }
178
get_nix_hw(struct rvu_hwinfo * hw,int blkaddr)179 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
180 {
181 int nix_blkaddr = 0, i = 0;
182 struct rvu *rvu = hw->rvu;
183
184 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
185 while (nix_blkaddr) {
186 if (blkaddr == nix_blkaddr && hw->nix)
187 return &hw->nix[i];
188 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
189 i++;
190 }
191 return NULL;
192 }
193
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)194 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
195 {
196 dwrr_mtu &= 0x1FULL;
197
198 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
199 * Value of 4 is reserved for MTU value of 9728 bytes.
200 * Value of 5 is reserved for MTU value of 10240 bytes.
201 */
202 switch (dwrr_mtu) {
203 case 4:
204 return 9728;
205 case 5:
206 return 10240;
207 default:
208 return BIT_ULL(dwrr_mtu);
209 }
210
211 return 0;
212 }
213
convert_bytes_to_dwrr_mtu(u32 bytes)214 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
215 {
216 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
217 * Value of 4 is reserved for MTU value of 9728 bytes.
218 * Value of 5 is reserved for MTU value of 10240 bytes.
219 */
220 if (bytes > BIT_ULL(16))
221 return 0;
222
223 switch (bytes) {
224 case 9728:
225 return 4;
226 case 10240:
227 return 5;
228 default:
229 return ilog2(bytes);
230 }
231
232 return 0;
233 }
234
nix_rx_sync(struct rvu * rvu,int blkaddr)235 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
236 {
237 int err;
238
239 /* Sync all in flight RX packets to LLC/DRAM */
240 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
241 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
242 if (err)
243 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
244
245 /* SW_SYNC ensures all existing transactions are finished and pkts
246 * are written to LLC/DRAM, queues should be teared down after
247 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
248 * an existing transaction might end after SW_SYNC operation. To
249 * ensure operation is fully done, do the SW_SYNC twice.
250 */
251 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
252 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
253 if (err)
254 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
255 }
256
is_valid_txschq(struct rvu * rvu,int blkaddr,int lvl,u16 pcifunc,u16 schq)257 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
258 int lvl, u16 pcifunc, u16 schq)
259 {
260 struct rvu_hwinfo *hw = rvu->hw;
261 struct nix_txsch *txsch;
262 struct nix_hw *nix_hw;
263 u16 map_func;
264
265 nix_hw = get_nix_hw(rvu->hw, blkaddr);
266 if (!nix_hw)
267 return false;
268
269 txsch = &nix_hw->txsch[lvl];
270 /* Check out of bounds */
271 if (schq >= txsch->schq.max)
272 return false;
273
274 mutex_lock(&rvu->rsrc_lock);
275 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
276 mutex_unlock(&rvu->rsrc_lock);
277
278 /* TLs aggegating traffic are shared across PF and VFs */
279 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
280 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
281 return false;
282 else
283 return true;
284 }
285
286 if (map_func != pcifunc)
287 return false;
288
289 return true;
290 }
291
nix_interface_init(struct rvu * rvu,u16 pcifunc,int type,int nixlf,struct nix_lf_alloc_rsp * rsp,bool loop)292 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
293 struct nix_lf_alloc_rsp *rsp, bool loop)
294 {
295 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
296 u16 req_chan_base, req_chan_end, req_chan_cnt;
297 struct rvu_hwinfo *hw = rvu->hw;
298 struct sdp_node_info *sdp_info;
299 int pkind, pf, vf, lbkid, vfid;
300 u8 cgx_id, lmac_id;
301 bool from_vf;
302 int err;
303
304 pf = rvu_get_pf(pcifunc);
305 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
306 type != NIX_INTF_TYPE_SDP)
307 return 0;
308
309 switch (type) {
310 case NIX_INTF_TYPE_CGX:
311 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
312 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
313
314 pkind = rvu_npc_get_pkind(rvu, pf);
315 if (pkind < 0) {
316 dev_err(rvu->dev,
317 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
318 return -EINVAL;
319 }
320 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
321 pfvf->tx_chan_base = pfvf->rx_chan_base;
322 pfvf->rx_chan_cnt = 1;
323 pfvf->tx_chan_cnt = 1;
324 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
325
326 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
327 rvu_npc_set_pkind(rvu, pkind, pfvf);
328
329 break;
330 case NIX_INTF_TYPE_LBK:
331 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
332
333 /* If NIX1 block is present on the silicon then NIXes are
334 * assigned alternatively for lbk interfaces. NIX0 should
335 * send packets on lbk link 1 channels and NIX1 should send
336 * on lbk link 0 channels for the communication between
337 * NIX0 and NIX1.
338 */
339 lbkid = 0;
340 if (rvu->hw->lbk_links > 1)
341 lbkid = vf & 0x1 ? 0 : 1;
342
343 /* By default NIX0 is configured to send packet on lbk link 1
344 * (which corresponds to LBK1), same packet will receive on
345 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
346 * (which corresponds to LBK2) packet will receive on NIX0 lbk
347 * link 1.
348 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
349 * transmits and receives on lbk link 0, whick corresponds
350 * to LBK1 block, back to back connectivity between NIX and
351 * LBK can be achieved (which is similar to 96xx)
352 *
353 * RX TX
354 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
355 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
356 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
357 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
358 */
359 if (loop)
360 lbkid = !lbkid;
361
362 /* Note that AF's VFs work in pairs and talk over consecutive
363 * loopback channels.Therefore if odd number of AF VFs are
364 * enabled then the last VF remains with no pair.
365 */
366 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
367 pfvf->tx_chan_base = vf & 0x1 ?
368 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
369 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
370 pfvf->rx_chan_cnt = 1;
371 pfvf->tx_chan_cnt = 1;
372 rsp->tx_link = hw->cgx_links + lbkid;
373 pfvf->lbkid = lbkid;
374 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
375 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
376 pfvf->rx_chan_base,
377 pfvf->rx_chan_cnt);
378
379 break;
380 case NIX_INTF_TYPE_SDP:
381 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
382 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
383 sdp_info = parent_pf->sdp_info;
384 if (!sdp_info) {
385 dev_err(rvu->dev, "Invalid sdp_info pointer\n");
386 return -EINVAL;
387 }
388 if (from_vf) {
389 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
390 sdp_info->num_pf_rings;
391 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
392 for (vfid = 0; vfid < vf; vfid++)
393 req_chan_base += sdp_info->vf_rings[vfid];
394 req_chan_cnt = sdp_info->vf_rings[vf];
395 req_chan_end = req_chan_base + req_chan_cnt - 1;
396 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
397 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
398 dev_err(rvu->dev,
399 "PF_Func 0x%x: Invalid channel base and count\n",
400 pcifunc);
401 return -EINVAL;
402 }
403 } else {
404 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
405 req_chan_cnt = sdp_info->num_pf_rings;
406 }
407
408 pfvf->rx_chan_base = req_chan_base;
409 pfvf->rx_chan_cnt = req_chan_cnt;
410 pfvf->tx_chan_base = pfvf->rx_chan_base;
411 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
412
413 rsp->tx_link = hw->cgx_links + hw->lbk_links;
414 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
415 pfvf->rx_chan_base,
416 pfvf->rx_chan_cnt);
417 break;
418 }
419
420 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
421 * RVU PF/VF's MAC address.
422 */
423 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
424 pfvf->rx_chan_base, pfvf->mac_addr);
425
426 /* Add this PF_FUNC to bcast pkt replication list */
427 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
428 if (err) {
429 dev_err(rvu->dev,
430 "Bcast list, failed to enable PF_FUNC 0x%x\n",
431 pcifunc);
432 return err;
433 }
434 /* Install MCAM rule matching Ethernet broadcast mac address */
435 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
436 nixlf, pfvf->rx_chan_base);
437
438 pfvf->maxlen = NIC_HW_MIN_FRS;
439 pfvf->minlen = NIC_HW_MIN_FRS;
440
441 return 0;
442 }
443
nix_interface_deinit(struct rvu * rvu,u16 pcifunc,u8 nixlf)444 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
445 {
446 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
447 int err;
448
449 pfvf->maxlen = 0;
450 pfvf->minlen = 0;
451
452 /* Remove this PF_FUNC from bcast pkt replication list */
453 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
454 if (err) {
455 dev_err(rvu->dev,
456 "Bcast list, failed to disable PF_FUNC 0x%x\n",
457 pcifunc);
458 }
459
460 /* Free and disable any MCAM entries used by this NIX LF */
461 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
462
463 /* Disable DMAC filters used */
464 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
465 }
466
rvu_mbox_handler_nix_bp_disable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct msg_rsp * rsp)467 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
468 struct nix_bp_cfg_req *req,
469 struct msg_rsp *rsp)
470 {
471 u16 pcifunc = req->hdr.pcifunc;
472 struct rvu_pfvf *pfvf;
473 int blkaddr, pf, type;
474 u16 chan_base, chan;
475 u64 cfg;
476
477 pf = rvu_get_pf(pcifunc);
478 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
479 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
480 return 0;
481
482 pfvf = rvu_get_pfvf(rvu, pcifunc);
483 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
484
485 chan_base = pfvf->rx_chan_base + req->chan_base;
486 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
487 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
488 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
489 cfg & ~BIT_ULL(16));
490 }
491 return 0;
492 }
493
rvu_nix_get_bpid(struct rvu * rvu,struct nix_bp_cfg_req * req,int type,int chan_id)494 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
495 int type, int chan_id)
496 {
497 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
498 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
499 struct rvu_hwinfo *hw = rvu->hw;
500 struct rvu_pfvf *pfvf;
501 u8 cgx_id, lmac_id;
502 u64 cfg;
503
504 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
505 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
506 lmac_chan_cnt = cfg & 0xFF;
507
508 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
509 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
510
511 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
512 sdp_chan_cnt = cfg & 0xFFF;
513 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
514
515 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
516
517 /* Backpressure IDs range division
518 * CGX channles are mapped to (0 - 191) BPIDs
519 * LBK channles are mapped to (192 - 255) BPIDs
520 * SDP channles are mapped to (256 - 511) BPIDs
521 *
522 * Lmac channles and bpids mapped as follows
523 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
524 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
525 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
526 */
527 switch (type) {
528 case NIX_INTF_TYPE_CGX:
529 if ((req->chan_base + req->chan_cnt) > 16)
530 return -EINVAL;
531 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
532 /* Assign bpid based on cgx, lmac and chan id */
533 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
534 (lmac_id * lmac_chan_cnt) + req->chan_base;
535
536 if (req->bpid_per_chan)
537 bpid += chan_id;
538 if (bpid > cgx_bpid_cnt)
539 return -EINVAL;
540 break;
541
542 case NIX_INTF_TYPE_LBK:
543 if ((req->chan_base + req->chan_cnt) > 63)
544 return -EINVAL;
545 bpid = cgx_bpid_cnt + req->chan_base;
546 if (req->bpid_per_chan)
547 bpid += chan_id;
548 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
549 return -EINVAL;
550 break;
551 case NIX_INTF_TYPE_SDP:
552 if ((req->chan_base + req->chan_cnt) > 255)
553 return -EINVAL;
554
555 bpid = sdp_bpid_cnt + req->chan_base;
556 if (req->bpid_per_chan)
557 bpid += chan_id;
558
559 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
560 return -EINVAL;
561 break;
562 default:
563 return -EINVAL;
564 }
565 return bpid;
566 }
567
rvu_mbox_handler_nix_bp_enable(struct rvu * rvu,struct nix_bp_cfg_req * req,struct nix_bp_cfg_rsp * rsp)568 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
569 struct nix_bp_cfg_req *req,
570 struct nix_bp_cfg_rsp *rsp)
571 {
572 int blkaddr, pf, type, chan_id = 0;
573 u16 pcifunc = req->hdr.pcifunc;
574 struct rvu_pfvf *pfvf;
575 u16 chan_base, chan;
576 s16 bpid, bpid_base;
577 u64 cfg;
578
579 pf = rvu_get_pf(pcifunc);
580 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
581 if (is_sdp_pfvf(pcifunc))
582 type = NIX_INTF_TYPE_SDP;
583
584 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
585 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
586 type != NIX_INTF_TYPE_SDP)
587 return 0;
588
589 pfvf = rvu_get_pfvf(rvu, pcifunc);
590 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
591
592 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
593 chan_base = pfvf->rx_chan_base + req->chan_base;
594 bpid = bpid_base;
595
596 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
597 if (bpid < 0) {
598 dev_warn(rvu->dev, "Fail to enable backpressure\n");
599 return -EINVAL;
600 }
601
602 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
603 cfg &= ~GENMASK_ULL(8, 0);
604 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
605 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
606 chan_id++;
607 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
608 }
609
610 for (chan = 0; chan < req->chan_cnt; chan++) {
611 /* Map channel and bpid assign to it */
612 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
613 (bpid_base & 0x3FF);
614 if (req->bpid_per_chan)
615 bpid_base++;
616 }
617 rsp->chan_cnt = req->chan_cnt;
618
619 return 0;
620 }
621
nix_setup_lso_tso_l3(struct rvu * rvu,int blkaddr,u64 format,bool v4,u64 * fidx)622 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
623 u64 format, bool v4, u64 *fidx)
624 {
625 struct nix_lso_format field = {0};
626
627 /* IP's Length field */
628 field.layer = NIX_TXLAYER_OL3;
629 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
630 field.offset = v4 ? 2 : 4;
631 field.sizem1 = 1; /* i.e 2 bytes */
632 field.alg = NIX_LSOALG_ADD_PAYLEN;
633 rvu_write64(rvu, blkaddr,
634 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
635 *(u64 *)&field);
636
637 /* No ID field in IPv6 header */
638 if (!v4)
639 return;
640
641 /* IP's ID field */
642 field.layer = NIX_TXLAYER_OL3;
643 field.offset = 4;
644 field.sizem1 = 1; /* i.e 2 bytes */
645 field.alg = NIX_LSOALG_ADD_SEGNUM;
646 rvu_write64(rvu, blkaddr,
647 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
648 *(u64 *)&field);
649 }
650
nix_setup_lso_tso_l4(struct rvu * rvu,int blkaddr,u64 format,u64 * fidx)651 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
652 u64 format, u64 *fidx)
653 {
654 struct nix_lso_format field = {0};
655
656 /* TCP's sequence number field */
657 field.layer = NIX_TXLAYER_OL4;
658 field.offset = 4;
659 field.sizem1 = 3; /* i.e 4 bytes */
660 field.alg = NIX_LSOALG_ADD_OFFSET;
661 rvu_write64(rvu, blkaddr,
662 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
663 *(u64 *)&field);
664
665 /* TCP's flags field */
666 field.layer = NIX_TXLAYER_OL4;
667 field.offset = 12;
668 field.sizem1 = 1; /* 2 bytes */
669 field.alg = NIX_LSOALG_TCP_FLAGS;
670 rvu_write64(rvu, blkaddr,
671 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
672 *(u64 *)&field);
673 }
674
nix_setup_lso(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)675 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
676 {
677 u64 cfg, idx, fidx = 0;
678
679 /* Get max HW supported format indices */
680 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
681 nix_hw->lso.total = cfg;
682
683 /* Enable LSO */
684 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
685 /* For TSO, set first and middle segment flags to
686 * mask out PSH, RST & FIN flags in TCP packet
687 */
688 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
689 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
690 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
691
692 /* Setup default static LSO formats
693 *
694 * Configure format fields for TCPv4 segmentation offload
695 */
696 idx = NIX_LSO_FORMAT_IDX_TSOV4;
697 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
698 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
699
700 /* Set rest of the fields to NOP */
701 for (; fidx < 8; fidx++) {
702 rvu_write64(rvu, blkaddr,
703 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
704 }
705 nix_hw->lso.in_use++;
706
707 /* Configure format fields for TCPv6 segmentation offload */
708 idx = NIX_LSO_FORMAT_IDX_TSOV6;
709 fidx = 0;
710 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
711 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
712
713 /* Set rest of the fields to NOP */
714 for (; fidx < 8; fidx++) {
715 rvu_write64(rvu, blkaddr,
716 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
717 }
718 nix_hw->lso.in_use++;
719 }
720
nix_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)721 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
722 {
723 kfree(pfvf->rq_bmap);
724 kfree(pfvf->sq_bmap);
725 kfree(pfvf->cq_bmap);
726 if (pfvf->rq_ctx)
727 qmem_free(rvu->dev, pfvf->rq_ctx);
728 if (pfvf->sq_ctx)
729 qmem_free(rvu->dev, pfvf->sq_ctx);
730 if (pfvf->cq_ctx)
731 qmem_free(rvu->dev, pfvf->cq_ctx);
732 if (pfvf->rss_ctx)
733 qmem_free(rvu->dev, pfvf->rss_ctx);
734 if (pfvf->nix_qints_ctx)
735 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
736 if (pfvf->cq_ints_ctx)
737 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
738
739 pfvf->rq_bmap = NULL;
740 pfvf->cq_bmap = NULL;
741 pfvf->sq_bmap = NULL;
742 pfvf->rq_ctx = NULL;
743 pfvf->sq_ctx = NULL;
744 pfvf->cq_ctx = NULL;
745 pfvf->rss_ctx = NULL;
746 pfvf->nix_qints_ctx = NULL;
747 pfvf->cq_ints_ctx = NULL;
748 }
749
nixlf_rss_ctx_init(struct rvu * rvu,int blkaddr,struct rvu_pfvf * pfvf,int nixlf,int rss_sz,int rss_grps,int hwctx_size,u64 way_mask,bool tag_lsb_as_adder)750 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
751 struct rvu_pfvf *pfvf, int nixlf,
752 int rss_sz, int rss_grps, int hwctx_size,
753 u64 way_mask, bool tag_lsb_as_adder)
754 {
755 int err, grp, num_indices;
756 u64 val;
757
758 /* RSS is not requested for this NIXLF */
759 if (!rss_sz)
760 return 0;
761 num_indices = rss_sz * rss_grps;
762
763 /* Alloc NIX RSS HW context memory and config the base */
764 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
765 if (err)
766 return err;
767
768 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
769 (u64)pfvf->rss_ctx->iova);
770
771 /* Config full RSS table size, enable RSS and caching */
772 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
773 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
774
775 if (tag_lsb_as_adder)
776 val |= BIT_ULL(5);
777
778 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
779 /* Config RSS group offset and sizes */
780 for (grp = 0; grp < rss_grps; grp++)
781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
782 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
783 return 0;
784 }
785
nix_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct nix_aq_inst_s * inst)786 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
787 struct nix_aq_inst_s *inst)
788 {
789 struct admin_queue *aq = block->aq;
790 struct nix_aq_res_s *result;
791 int timeout = 1000;
792 u64 reg, head;
793
794 result = (struct nix_aq_res_s *)aq->res->base;
795
796 /* Get current head pointer where to append this instruction */
797 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
798 head = (reg >> 4) & AQ_PTR_MASK;
799
800 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
801 (void *)inst, aq->inst->entry_sz);
802 memset(result, 0, sizeof(*result));
803 /* sync into memory */
804 wmb();
805
806 /* Ring the doorbell and wait for result */
807 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
808 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
809 cpu_relax();
810 udelay(1);
811 timeout--;
812 if (!timeout)
813 return -EBUSY;
814 }
815
816 if (result->compcode != NIX_AQ_COMP_GOOD)
817 /* TODO: Replace this with some error code */
818 return -EBUSY;
819
820 return 0;
821 }
822
rvu_nix_blk_aq_enq_inst(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)823 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
824 struct nix_aq_enq_req *req,
825 struct nix_aq_enq_rsp *rsp)
826 {
827 struct rvu_hwinfo *hw = rvu->hw;
828 u16 pcifunc = req->hdr.pcifunc;
829 int nixlf, blkaddr, rc = 0;
830 struct nix_aq_inst_s inst;
831 struct rvu_block *block;
832 struct admin_queue *aq;
833 struct rvu_pfvf *pfvf;
834 void *ctx, *mask;
835 bool ena;
836 u64 cfg;
837
838 blkaddr = nix_hw->blkaddr;
839 block = &hw->block[blkaddr];
840 aq = block->aq;
841 if (!aq) {
842 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
843 return NIX_AF_ERR_AQ_ENQUEUE;
844 }
845
846 pfvf = rvu_get_pfvf(rvu, pcifunc);
847 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
848
849 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
850 * operations done by AF itself.
851 */
852 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
853 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
854 if (!pfvf->nixlf || nixlf < 0)
855 return NIX_AF_ERR_AF_LF_INVALID;
856 }
857
858 switch (req->ctype) {
859 case NIX_AQ_CTYPE_RQ:
860 /* Check if index exceeds max no of queues */
861 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
862 rc = NIX_AF_ERR_AQ_ENQUEUE;
863 break;
864 case NIX_AQ_CTYPE_SQ:
865 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
866 rc = NIX_AF_ERR_AQ_ENQUEUE;
867 break;
868 case NIX_AQ_CTYPE_CQ:
869 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
870 rc = NIX_AF_ERR_AQ_ENQUEUE;
871 break;
872 case NIX_AQ_CTYPE_RSS:
873 /* Check if RSS is enabled and qidx is within range */
874 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
875 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
876 (req->qidx >= (256UL << (cfg & 0xF))))
877 rc = NIX_AF_ERR_AQ_ENQUEUE;
878 break;
879 case NIX_AQ_CTYPE_MCE:
880 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
881
882 /* Check if index exceeds MCE list length */
883 if (!nix_hw->mcast.mce_ctx ||
884 (req->qidx >= (256UL << (cfg & 0xF))))
885 rc = NIX_AF_ERR_AQ_ENQUEUE;
886
887 /* Adding multicast lists for requests from PF/VFs is not
888 * yet supported, so ignore this.
889 */
890 if (rsp)
891 rc = NIX_AF_ERR_AQ_ENQUEUE;
892 break;
893 case NIX_AQ_CTYPE_BANDPROF:
894 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
895 nix_hw, pcifunc))
896 rc = NIX_AF_ERR_INVALID_BANDPROF;
897 break;
898 default:
899 rc = NIX_AF_ERR_AQ_ENQUEUE;
900 }
901
902 if (rc)
903 return rc;
904
905 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
906 if (req->ctype == NIX_AQ_CTYPE_SQ &&
907 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
908 (req->op == NIX_AQ_INSTOP_WRITE &&
909 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
910 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
911 pcifunc, req->sq.smq))
912 return NIX_AF_ERR_AQ_ENQUEUE;
913 }
914
915 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
916 inst.lf = nixlf;
917 inst.cindex = req->qidx;
918 inst.ctype = req->ctype;
919 inst.op = req->op;
920 /* Currently we are not supporting enqueuing multiple instructions,
921 * so always choose first entry in result memory.
922 */
923 inst.res_addr = (u64)aq->res->iova;
924
925 /* Hardware uses same aq->res->base for updating result of
926 * previous instruction hence wait here till it is done.
927 */
928 spin_lock(&aq->lock);
929
930 /* Clean result + context memory */
931 memset(aq->res->base, 0, aq->res->entry_sz);
932 /* Context needs to be written at RES_ADDR + 128 */
933 ctx = aq->res->base + 128;
934 /* Mask needs to be written at RES_ADDR + 256 */
935 mask = aq->res->base + 256;
936
937 switch (req->op) {
938 case NIX_AQ_INSTOP_WRITE:
939 if (req->ctype == NIX_AQ_CTYPE_RQ)
940 memcpy(mask, &req->rq_mask,
941 sizeof(struct nix_rq_ctx_s));
942 else if (req->ctype == NIX_AQ_CTYPE_SQ)
943 memcpy(mask, &req->sq_mask,
944 sizeof(struct nix_sq_ctx_s));
945 else if (req->ctype == NIX_AQ_CTYPE_CQ)
946 memcpy(mask, &req->cq_mask,
947 sizeof(struct nix_cq_ctx_s));
948 else if (req->ctype == NIX_AQ_CTYPE_RSS)
949 memcpy(mask, &req->rss_mask,
950 sizeof(struct nix_rsse_s));
951 else if (req->ctype == NIX_AQ_CTYPE_MCE)
952 memcpy(mask, &req->mce_mask,
953 sizeof(struct nix_rx_mce_s));
954 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
955 memcpy(mask, &req->prof_mask,
956 sizeof(struct nix_bandprof_s));
957 fallthrough;
958 case NIX_AQ_INSTOP_INIT:
959 if (req->ctype == NIX_AQ_CTYPE_RQ)
960 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
961 else if (req->ctype == NIX_AQ_CTYPE_SQ)
962 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
963 else if (req->ctype == NIX_AQ_CTYPE_CQ)
964 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
965 else if (req->ctype == NIX_AQ_CTYPE_RSS)
966 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
967 else if (req->ctype == NIX_AQ_CTYPE_MCE)
968 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
969 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
970 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
971 break;
972 case NIX_AQ_INSTOP_NOP:
973 case NIX_AQ_INSTOP_READ:
974 case NIX_AQ_INSTOP_LOCK:
975 case NIX_AQ_INSTOP_UNLOCK:
976 break;
977 default:
978 rc = NIX_AF_ERR_AQ_ENQUEUE;
979 spin_unlock(&aq->lock);
980 return rc;
981 }
982
983 /* Submit the instruction to AQ */
984 rc = nix_aq_enqueue_wait(rvu, block, &inst);
985 if (rc) {
986 spin_unlock(&aq->lock);
987 return rc;
988 }
989
990 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
991 if (req->op == NIX_AQ_INSTOP_INIT) {
992 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
993 __set_bit(req->qidx, pfvf->rq_bmap);
994 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
995 __set_bit(req->qidx, pfvf->sq_bmap);
996 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
997 __set_bit(req->qidx, pfvf->cq_bmap);
998 }
999
1000 if (req->op == NIX_AQ_INSTOP_WRITE) {
1001 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1002 ena = (req->rq.ena & req->rq_mask.ena) |
1003 (test_bit(req->qidx, pfvf->rq_bmap) &
1004 ~req->rq_mask.ena);
1005 if (ena)
1006 __set_bit(req->qidx, pfvf->rq_bmap);
1007 else
1008 __clear_bit(req->qidx, pfvf->rq_bmap);
1009 }
1010 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1011 ena = (req->rq.ena & req->sq_mask.ena) |
1012 (test_bit(req->qidx, pfvf->sq_bmap) &
1013 ~req->sq_mask.ena);
1014 if (ena)
1015 __set_bit(req->qidx, pfvf->sq_bmap);
1016 else
1017 __clear_bit(req->qidx, pfvf->sq_bmap);
1018 }
1019 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1020 ena = (req->rq.ena & req->cq_mask.ena) |
1021 (test_bit(req->qidx, pfvf->cq_bmap) &
1022 ~req->cq_mask.ena);
1023 if (ena)
1024 __set_bit(req->qidx, pfvf->cq_bmap);
1025 else
1026 __clear_bit(req->qidx, pfvf->cq_bmap);
1027 }
1028 }
1029
1030 if (rsp) {
1031 /* Copy read context into mailbox */
1032 if (req->op == NIX_AQ_INSTOP_READ) {
1033 if (req->ctype == NIX_AQ_CTYPE_RQ)
1034 memcpy(&rsp->rq, ctx,
1035 sizeof(struct nix_rq_ctx_s));
1036 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1037 memcpy(&rsp->sq, ctx,
1038 sizeof(struct nix_sq_ctx_s));
1039 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1040 memcpy(&rsp->cq, ctx,
1041 sizeof(struct nix_cq_ctx_s));
1042 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1043 memcpy(&rsp->rss, ctx,
1044 sizeof(struct nix_rsse_s));
1045 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1046 memcpy(&rsp->mce, ctx,
1047 sizeof(struct nix_rx_mce_s));
1048 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1049 memcpy(&rsp->prof, ctx,
1050 sizeof(struct nix_bandprof_s));
1051 }
1052 }
1053
1054 spin_unlock(&aq->lock);
1055 return 0;
1056 }
1057
rvu_nix_verify_aq_ctx(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_aq_enq_req * req,u8 ctype)1058 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1059 struct nix_aq_enq_req *req, u8 ctype)
1060 {
1061 struct nix_cn10k_aq_enq_req aq_req;
1062 struct nix_cn10k_aq_enq_rsp aq_rsp;
1063 int rc, word;
1064
1065 if (req->ctype != NIX_AQ_CTYPE_CQ)
1066 return 0;
1067
1068 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1069 req->hdr.pcifunc, ctype, req->qidx);
1070 if (rc) {
1071 dev_err(rvu->dev,
1072 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1073 __func__, nix_get_ctx_name(ctype), req->qidx,
1074 req->hdr.pcifunc);
1075 return rc;
1076 }
1077
1078 /* Make copy of original context & mask which are required
1079 * for resubmission
1080 */
1081 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1082 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1083
1084 /* exclude fields which HW can update */
1085 aq_req.cq_mask.cq_err = 0;
1086 aq_req.cq_mask.wrptr = 0;
1087 aq_req.cq_mask.tail = 0;
1088 aq_req.cq_mask.head = 0;
1089 aq_req.cq_mask.avg_level = 0;
1090 aq_req.cq_mask.update_time = 0;
1091 aq_req.cq_mask.substream = 0;
1092
1093 /* Context mask (cq_mask) holds mask value of fields which
1094 * are changed in AQ WRITE operation.
1095 * for example cq.drop = 0xa;
1096 * cq_mask.drop = 0xff;
1097 * Below logic performs '&' between cq and cq_mask so that non
1098 * updated fields are masked out for request and response
1099 * comparison
1100 */
1101 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1102 word++) {
1103 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1104 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1105 *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1106 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1107 }
1108
1109 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1110 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1111
1112 return 0;
1113 }
1114
rvu_nix_aq_enq_inst(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1115 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1116 struct nix_aq_enq_rsp *rsp)
1117 {
1118 struct nix_hw *nix_hw;
1119 int err, retries = 5;
1120 int blkaddr;
1121
1122 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1123 if (blkaddr < 0)
1124 return NIX_AF_ERR_AF_LF_INVALID;
1125
1126 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1127 if (!nix_hw)
1128 return NIX_AF_ERR_INVALID_NIXBLK;
1129
1130 retry:
1131 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1132
1133 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1134 * As a work around perfrom CQ context read after each AQ write. If AQ
1135 * read shows AQ write is not updated perform AQ write again.
1136 */
1137 if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1138 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1139 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1140 if (retries--)
1141 goto retry;
1142 else
1143 return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1144 }
1145 }
1146
1147 return err;
1148 }
1149
nix_get_ctx_name(int ctype)1150 static const char *nix_get_ctx_name(int ctype)
1151 {
1152 switch (ctype) {
1153 case NIX_AQ_CTYPE_CQ:
1154 return "CQ";
1155 case NIX_AQ_CTYPE_SQ:
1156 return "SQ";
1157 case NIX_AQ_CTYPE_RQ:
1158 return "RQ";
1159 case NIX_AQ_CTYPE_RSS:
1160 return "RSS";
1161 }
1162 return "";
1163 }
1164
nix_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)1165 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1166 {
1167 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1168 struct nix_aq_enq_req aq_req;
1169 unsigned long *bmap;
1170 int qidx, q_cnt = 0;
1171 int err = 0, rc;
1172
1173 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1174 return NIX_AF_ERR_AQ_ENQUEUE;
1175
1176 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1177 aq_req.hdr.pcifunc = req->hdr.pcifunc;
1178
1179 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1180 aq_req.cq.ena = 0;
1181 aq_req.cq_mask.ena = 1;
1182 aq_req.cq.bp_ena = 0;
1183 aq_req.cq_mask.bp_ena = 1;
1184 q_cnt = pfvf->cq_ctx->qsize;
1185 bmap = pfvf->cq_bmap;
1186 }
1187 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1188 aq_req.sq.ena = 0;
1189 aq_req.sq_mask.ena = 1;
1190 q_cnt = pfvf->sq_ctx->qsize;
1191 bmap = pfvf->sq_bmap;
1192 }
1193 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1194 aq_req.rq.ena = 0;
1195 aq_req.rq_mask.ena = 1;
1196 q_cnt = pfvf->rq_ctx->qsize;
1197 bmap = pfvf->rq_bmap;
1198 }
1199
1200 aq_req.ctype = req->ctype;
1201 aq_req.op = NIX_AQ_INSTOP_WRITE;
1202
1203 for (qidx = 0; qidx < q_cnt; qidx++) {
1204 if (!test_bit(qidx, bmap))
1205 continue;
1206 aq_req.qidx = qidx;
1207 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1208 if (rc) {
1209 err = rc;
1210 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1211 nix_get_ctx_name(req->ctype), qidx);
1212 }
1213 }
1214
1215 return err;
1216 }
1217
1218 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
nix_lf_hwctx_lockdown(struct rvu * rvu,struct nix_aq_enq_req * req)1219 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1220 {
1221 struct nix_aq_enq_req lock_ctx_req;
1222 int err;
1223
1224 if (req->op != NIX_AQ_INSTOP_INIT)
1225 return 0;
1226
1227 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1228 req->ctype == NIX_AQ_CTYPE_DYNO)
1229 return 0;
1230
1231 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1232 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1233 lock_ctx_req.ctype = req->ctype;
1234 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1235 lock_ctx_req.qidx = req->qidx;
1236 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1237 if (err)
1238 dev_err(rvu->dev,
1239 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1240 req->hdr.pcifunc,
1241 nix_get_ctx_name(req->ctype), req->qidx);
1242 return err;
1243 }
1244
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1245 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1246 struct nix_aq_enq_req *req,
1247 struct nix_aq_enq_rsp *rsp)
1248 {
1249 int err;
1250
1251 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1252 if (!err)
1253 err = nix_lf_hwctx_lockdown(rvu, req);
1254 return err;
1255 }
1256 #else
1257
rvu_mbox_handler_nix_aq_enq(struct rvu * rvu,struct nix_aq_enq_req * req,struct nix_aq_enq_rsp * rsp)1258 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1259 struct nix_aq_enq_req *req,
1260 struct nix_aq_enq_rsp *rsp)
1261 {
1262 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1263 }
1264 #endif
1265 /* CN10K mbox handler */
rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu * rvu,struct nix_cn10k_aq_enq_req * req,struct nix_cn10k_aq_enq_rsp * rsp)1266 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1267 struct nix_cn10k_aq_enq_req *req,
1268 struct nix_cn10k_aq_enq_rsp *rsp)
1269 {
1270 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1271 (struct nix_aq_enq_rsp *)rsp);
1272 }
1273
rvu_mbox_handler_nix_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)1274 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1275 struct hwctx_disable_req *req,
1276 struct msg_rsp *rsp)
1277 {
1278 return nix_lf_hwctx_disable(rvu, req);
1279 }
1280
rvu_mbox_handler_nix_lf_alloc(struct rvu * rvu,struct nix_lf_alloc_req * req,struct nix_lf_alloc_rsp * rsp)1281 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1282 struct nix_lf_alloc_req *req,
1283 struct nix_lf_alloc_rsp *rsp)
1284 {
1285 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1286 struct rvu_hwinfo *hw = rvu->hw;
1287 u16 pcifunc = req->hdr.pcifunc;
1288 struct rvu_block *block;
1289 struct rvu_pfvf *pfvf;
1290 u64 cfg, ctx_cfg;
1291 int blkaddr;
1292
1293 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1294 return NIX_AF_ERR_PARAM;
1295
1296 if (req->way_mask)
1297 req->way_mask &= 0xFFFF;
1298
1299 pfvf = rvu_get_pfvf(rvu, pcifunc);
1300 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1301 if (!pfvf->nixlf || blkaddr < 0)
1302 return NIX_AF_ERR_AF_LF_INVALID;
1303
1304 block = &hw->block[blkaddr];
1305 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1306 if (nixlf < 0)
1307 return NIX_AF_ERR_AF_LF_INVALID;
1308
1309 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1310 if (req->npa_func) {
1311 /* If default, use 'this' NIXLF's PFFUNC */
1312 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1313 req->npa_func = pcifunc;
1314 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1315 return NIX_AF_INVAL_NPA_PF_FUNC;
1316 }
1317
1318 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1319 if (req->sso_func) {
1320 /* If default, use 'this' NIXLF's PFFUNC */
1321 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1322 req->sso_func = pcifunc;
1323 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1324 return NIX_AF_INVAL_SSO_PF_FUNC;
1325 }
1326
1327 /* If RSS is being enabled, check if requested config is valid.
1328 * RSS table size should be power of two, otherwise
1329 * RSS_GRP::OFFSET + adder might go beyond that group or
1330 * won't be able to use entire table.
1331 */
1332 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1333 !is_power_of_2(req->rss_sz)))
1334 return NIX_AF_ERR_RSS_SIZE_INVALID;
1335
1336 if (req->rss_sz &&
1337 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1338 return NIX_AF_ERR_RSS_GRPS_INVALID;
1339
1340 /* Reset this NIX LF */
1341 err = rvu_lf_reset(rvu, block, nixlf);
1342 if (err) {
1343 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1344 block->addr - BLKADDR_NIX0, nixlf);
1345 return NIX_AF_ERR_LF_RESET;
1346 }
1347
1348 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1349
1350 /* Alloc NIX RQ HW context memory and config the base */
1351 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1352 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1353 if (err)
1354 goto free_mem;
1355
1356 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1357 if (!pfvf->rq_bmap)
1358 goto free_mem;
1359
1360 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1361 (u64)pfvf->rq_ctx->iova);
1362
1363 /* Set caching and queue count in HW */
1364 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1365 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1366
1367 /* Alloc NIX SQ HW context memory and config the base */
1368 hwctx_size = 1UL << (ctx_cfg & 0xF);
1369 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1370 if (err)
1371 goto free_mem;
1372
1373 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1374 if (!pfvf->sq_bmap)
1375 goto free_mem;
1376
1377 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1378 (u64)pfvf->sq_ctx->iova);
1379
1380 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1381 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1382
1383 /* Alloc NIX CQ HW context memory and config the base */
1384 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1385 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1386 if (err)
1387 goto free_mem;
1388
1389 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1390 if (!pfvf->cq_bmap)
1391 goto free_mem;
1392
1393 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1394 (u64)pfvf->cq_ctx->iova);
1395
1396 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1397 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1398
1399 /* Initialize receive side scaling (RSS) */
1400 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1401 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1402 req->rss_grps, hwctx_size, req->way_mask,
1403 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1404 if (err)
1405 goto free_mem;
1406
1407 /* Alloc memory for CQINT's HW contexts */
1408 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1409 qints = (cfg >> 24) & 0xFFF;
1410 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1411 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1412 if (err)
1413 goto free_mem;
1414
1415 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1416 (u64)pfvf->cq_ints_ctx->iova);
1417
1418 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1419 BIT_ULL(36) | req->way_mask << 20);
1420
1421 /* Alloc memory for QINT's HW contexts */
1422 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1423 qints = (cfg >> 12) & 0xFFF;
1424 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1425 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1426 if (err)
1427 goto free_mem;
1428
1429 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1430 (u64)pfvf->nix_qints_ctx->iova);
1431 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1432 BIT_ULL(36) | req->way_mask << 20);
1433
1434 /* Setup VLANX TPID's.
1435 * Use VLAN1 for 802.1Q
1436 * and VLAN0 for 802.1AD.
1437 */
1438 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1439 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1440
1441 /* Enable LMTST for this NIX LF */
1442 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1443
1444 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1445 if (req->npa_func)
1446 cfg = req->npa_func;
1447 if (req->sso_func)
1448 cfg |= (u64)req->sso_func << 16;
1449
1450 cfg |= (u64)req->xqe_sz << 33;
1451 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1452
1453 /* Config Rx pkt length, csum checks and apad enable / disable */
1454 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1455
1456 /* Configure pkind for TX parse config */
1457 cfg = NPC_TX_DEF_PKIND;
1458 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1459
1460 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1461 if (is_sdp_pfvf(pcifunc))
1462 intf = NIX_INTF_TYPE_SDP;
1463
1464 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1465 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1466 if (err)
1467 goto free_mem;
1468
1469 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1470 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1471
1472 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1473 rvu_write64(rvu, blkaddr,
1474 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1475 VTAGSIZE_T4 | VTAG_STRIP);
1476
1477 goto exit;
1478
1479 free_mem:
1480 nix_ctx_free(rvu, pfvf);
1481 rc = -ENOMEM;
1482
1483 exit:
1484 /* Set macaddr of this PF/VF */
1485 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1486
1487 /* set SQB size info */
1488 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1489 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1490 rsp->rx_chan_base = pfvf->rx_chan_base;
1491 rsp->tx_chan_base = pfvf->tx_chan_base;
1492 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1493 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1494 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1495 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1496 /* Get HW supported stat count */
1497 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1498 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1499 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1500 /* Get count of CQ IRQs and error IRQs supported per LF */
1501 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1502 rsp->qints = ((cfg >> 12) & 0xFFF);
1503 rsp->cints = ((cfg >> 24) & 0xFFF);
1504 rsp->cgx_links = hw->cgx_links;
1505 rsp->lbk_links = hw->lbk_links;
1506 rsp->sdp_links = hw->sdp_links;
1507
1508 return rc;
1509 }
1510
rvu_mbox_handler_nix_lf_free(struct rvu * rvu,struct nix_lf_free_req * req,struct msg_rsp * rsp)1511 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1512 struct msg_rsp *rsp)
1513 {
1514 struct rvu_hwinfo *hw = rvu->hw;
1515 u16 pcifunc = req->hdr.pcifunc;
1516 struct rvu_block *block;
1517 int blkaddr, nixlf, err;
1518 struct rvu_pfvf *pfvf;
1519
1520 pfvf = rvu_get_pfvf(rvu, pcifunc);
1521 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1522 if (!pfvf->nixlf || blkaddr < 0)
1523 return NIX_AF_ERR_AF_LF_INVALID;
1524
1525 block = &hw->block[blkaddr];
1526 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1527 if (nixlf < 0)
1528 return NIX_AF_ERR_AF_LF_INVALID;
1529
1530 if (req->flags & NIX_LF_DISABLE_FLOWS)
1531 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1532 else
1533 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1534
1535 /* Free any tx vtag def entries used by this NIX LF */
1536 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1537 nix_free_tx_vtag_entries(rvu, pcifunc);
1538
1539 nix_interface_deinit(rvu, pcifunc, nixlf);
1540
1541 /* Reset this NIX LF */
1542 err = rvu_lf_reset(rvu, block, nixlf);
1543 if (err) {
1544 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1545 block->addr - BLKADDR_NIX0, nixlf);
1546 return NIX_AF_ERR_LF_RESET;
1547 }
1548
1549 nix_ctx_free(rvu, pfvf);
1550
1551 return 0;
1552 }
1553
rvu_mbox_handler_nix_mark_format_cfg(struct rvu * rvu,struct nix_mark_format_cfg * req,struct nix_mark_format_cfg_rsp * rsp)1554 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1555 struct nix_mark_format_cfg *req,
1556 struct nix_mark_format_cfg_rsp *rsp)
1557 {
1558 u16 pcifunc = req->hdr.pcifunc;
1559 struct nix_hw *nix_hw;
1560 struct rvu_pfvf *pfvf;
1561 int blkaddr, rc;
1562 u32 cfg;
1563
1564 pfvf = rvu_get_pfvf(rvu, pcifunc);
1565 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1566 if (!pfvf->nixlf || blkaddr < 0)
1567 return NIX_AF_ERR_AF_LF_INVALID;
1568
1569 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1570 if (!nix_hw)
1571 return NIX_AF_ERR_INVALID_NIXBLK;
1572
1573 cfg = (((u32)req->offset & 0x7) << 16) |
1574 (((u32)req->y_mask & 0xF) << 12) |
1575 (((u32)req->y_val & 0xF) << 8) |
1576 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1577
1578 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1579 if (rc < 0) {
1580 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1581 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1582 return NIX_AF_ERR_MARK_CFG_FAIL;
1583 }
1584
1585 rsp->mark_format_idx = rc;
1586 return 0;
1587 }
1588
1589 /* Handle shaper update specially for few revisions */
1590 static bool
handle_txschq_shaper_update(struct rvu * rvu,int blkaddr,int nixlf,int lvl,u64 reg,u64 regval)1591 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1592 int lvl, u64 reg, u64 regval)
1593 {
1594 u64 regbase, oldval, sw_xoff = 0;
1595 u64 dbgval, md_debug0 = 0;
1596 unsigned long poll_tmo;
1597 bool rate_reg = 0;
1598 u32 schq;
1599
1600 regbase = reg & 0xFFFF;
1601 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1602
1603 /* Check for rate register */
1604 switch (lvl) {
1605 case NIX_TXSCH_LVL_TL1:
1606 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1607 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1608
1609 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1610 break;
1611 case NIX_TXSCH_LVL_TL2:
1612 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1613 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1614
1615 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1616 regbase == NIX_AF_TL2X_PIR(0));
1617 break;
1618 case NIX_TXSCH_LVL_TL3:
1619 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1620 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1621
1622 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1623 regbase == NIX_AF_TL3X_PIR(0));
1624 break;
1625 case NIX_TXSCH_LVL_TL4:
1626 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1627 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1628
1629 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1630 regbase == NIX_AF_TL4X_PIR(0));
1631 break;
1632 case NIX_TXSCH_LVL_MDQ:
1633 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1634 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1635 regbase == NIX_AF_MDQX_PIR(0));
1636 break;
1637 }
1638
1639 if (!rate_reg)
1640 return false;
1641
1642 /* Nothing special to do when state is not toggled */
1643 oldval = rvu_read64(rvu, blkaddr, reg);
1644 if ((oldval & 0x1) == (regval & 0x1)) {
1645 rvu_write64(rvu, blkaddr, reg, regval);
1646 return true;
1647 }
1648
1649 /* PIR/CIR disable */
1650 if (!(regval & 0x1)) {
1651 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1652 rvu_write64(rvu, blkaddr, reg, 0);
1653 udelay(4);
1654 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1655 return true;
1656 }
1657
1658 /* PIR/CIR enable */
1659 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1660 if (md_debug0) {
1661 poll_tmo = jiffies + usecs_to_jiffies(10000);
1662 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1663 do {
1664 if (time_after(jiffies, poll_tmo)) {
1665 dev_err(rvu->dev,
1666 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1667 nixlf, schq, lvl);
1668 goto exit;
1669 }
1670 usleep_range(1, 5);
1671 dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1672 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1673 }
1674 rvu_write64(rvu, blkaddr, reg, regval);
1675 exit:
1676 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1677 return true;
1678 }
1679
1680 /* Disable shaping of pkts by a scheduler queue
1681 * at a given scheduler level.
1682 */
nix_reset_tx_shaping(struct rvu * rvu,int blkaddr,int nixlf,int lvl,int schq)1683 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1684 int nixlf, int lvl, int schq)
1685 {
1686 struct rvu_hwinfo *hw = rvu->hw;
1687 u64 cir_reg = 0, pir_reg = 0;
1688 u64 cfg;
1689
1690 switch (lvl) {
1691 case NIX_TXSCH_LVL_TL1:
1692 cir_reg = NIX_AF_TL1X_CIR(schq);
1693 pir_reg = 0; /* PIR not available at TL1 */
1694 break;
1695 case NIX_TXSCH_LVL_TL2:
1696 cir_reg = NIX_AF_TL2X_CIR(schq);
1697 pir_reg = NIX_AF_TL2X_PIR(schq);
1698 break;
1699 case NIX_TXSCH_LVL_TL3:
1700 cir_reg = NIX_AF_TL3X_CIR(schq);
1701 pir_reg = NIX_AF_TL3X_PIR(schq);
1702 break;
1703 case NIX_TXSCH_LVL_TL4:
1704 cir_reg = NIX_AF_TL4X_CIR(schq);
1705 pir_reg = NIX_AF_TL4X_PIR(schq);
1706 break;
1707 case NIX_TXSCH_LVL_MDQ:
1708 cir_reg = NIX_AF_MDQX_CIR(schq);
1709 pir_reg = NIX_AF_MDQX_PIR(schq);
1710 break;
1711 }
1712
1713 /* Shaper state toggle needs wait/poll */
1714 if (hw->cap.nix_shaper_toggle_wait) {
1715 if (cir_reg)
1716 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1717 lvl, cir_reg, 0);
1718 if (pir_reg)
1719 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1720 lvl, pir_reg, 0);
1721 return;
1722 }
1723
1724 if (!cir_reg)
1725 return;
1726 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1727 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1728
1729 if (!pir_reg)
1730 return;
1731 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1732 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1733 }
1734
nix_reset_tx_linkcfg(struct rvu * rvu,int blkaddr,int lvl,int schq)1735 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1736 int lvl, int schq)
1737 {
1738 struct rvu_hwinfo *hw = rvu->hw;
1739 int link_level;
1740 int link;
1741
1742 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1743 return;
1744
1745 /* Reset TL4's SDP link config */
1746 if (lvl == NIX_TXSCH_LVL_TL4)
1747 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1748
1749 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1750 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1751 if (lvl != link_level)
1752 return;
1753
1754 /* Reset TL2's CGX or LBK link config */
1755 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1756 rvu_write64(rvu, blkaddr,
1757 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1758 }
1759
nix_clear_tx_xoff(struct rvu * rvu,int blkaddr,int lvl,int schq)1760 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1761 int lvl, int schq)
1762 {
1763 struct rvu_hwinfo *hw = rvu->hw;
1764 u64 reg;
1765
1766 /* Skip this if shaping is not supported */
1767 if (!hw->cap.nix_shaping)
1768 return;
1769
1770 /* Clear level specific SW_XOFF */
1771 switch (lvl) {
1772 case NIX_TXSCH_LVL_TL1:
1773 reg = NIX_AF_TL1X_SW_XOFF(schq);
1774 break;
1775 case NIX_TXSCH_LVL_TL2:
1776 reg = NIX_AF_TL2X_SW_XOFF(schq);
1777 break;
1778 case NIX_TXSCH_LVL_TL3:
1779 reg = NIX_AF_TL3X_SW_XOFF(schq);
1780 break;
1781 case NIX_TXSCH_LVL_TL4:
1782 reg = NIX_AF_TL4X_SW_XOFF(schq);
1783 break;
1784 case NIX_TXSCH_LVL_MDQ:
1785 reg = NIX_AF_MDQX_SW_XOFF(schq);
1786 break;
1787 default:
1788 return;
1789 }
1790
1791 rvu_write64(rvu, blkaddr, reg, 0x0);
1792 }
1793
nix_get_tx_link(struct rvu * rvu,u16 pcifunc)1794 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1795 {
1796 struct rvu_hwinfo *hw = rvu->hw;
1797 int pf = rvu_get_pf(pcifunc);
1798 u8 cgx_id = 0, lmac_id = 0;
1799
1800 if (is_afvf(pcifunc)) {/* LBK links */
1801 return hw->cgx_links;
1802 } else if (is_pf_cgxmapped(rvu, pf)) {
1803 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1804 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1805 }
1806
1807 /* SDP link */
1808 return hw->cgx_links + hw->lbk_links;
1809 }
1810
nix_get_txschq_range(struct rvu * rvu,u16 pcifunc,int link,int * start,int * end)1811 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1812 int link, int *start, int *end)
1813 {
1814 struct rvu_hwinfo *hw = rvu->hw;
1815 int pf = rvu_get_pf(pcifunc);
1816
1817 if (is_afvf(pcifunc)) { /* LBK links */
1818 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1819 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1820 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1821 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1822 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1823 } else { /* SDP link */
1824 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1825 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1826 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1827 }
1828 }
1829
nix_check_txschq_alloc_req(struct rvu * rvu,int lvl,u16 pcifunc,struct nix_hw * nix_hw,struct nix_txsch_alloc_req * req)1830 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1831 struct nix_hw *nix_hw,
1832 struct nix_txsch_alloc_req *req)
1833 {
1834 struct rvu_hwinfo *hw = rvu->hw;
1835 int schq, req_schq, free_cnt;
1836 struct nix_txsch *txsch;
1837 int link, start, end;
1838
1839 txsch = &nix_hw->txsch[lvl];
1840 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1841
1842 if (!req_schq)
1843 return 0;
1844
1845 link = nix_get_tx_link(rvu, pcifunc);
1846
1847 /* For traffic aggregating scheduler level, one queue is enough */
1848 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1849 if (req_schq != 1)
1850 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1851 return 0;
1852 }
1853
1854 /* Get free SCHQ count and check if request can be accomodated */
1855 if (hw->cap.nix_fixed_txschq_mapping) {
1856 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1857 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1858 if (end <= txsch->schq.max && schq < end &&
1859 !test_bit(schq, txsch->schq.bmap))
1860 free_cnt = 1;
1861 else
1862 free_cnt = 0;
1863 } else {
1864 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1865 }
1866
1867 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1868 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1869
1870 /* If contiguous queues are needed, check for availability */
1871 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1872 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1873 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1874
1875 return 0;
1876 }
1877
nix_txsch_alloc(struct rvu * rvu,struct nix_txsch * txsch,struct nix_txsch_alloc_rsp * rsp,int lvl,int start,int end)1878 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1879 struct nix_txsch_alloc_rsp *rsp,
1880 int lvl, int start, int end)
1881 {
1882 struct rvu_hwinfo *hw = rvu->hw;
1883 u16 pcifunc = rsp->hdr.pcifunc;
1884 int idx, schq;
1885
1886 /* For traffic aggregating levels, queue alloc is based
1887 * on transmit link to which PF_FUNC is mapped to.
1888 */
1889 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1890 /* A single TL queue is allocated */
1891 if (rsp->schq_contig[lvl]) {
1892 rsp->schq_contig[lvl] = 1;
1893 rsp->schq_contig_list[lvl][0] = start;
1894 }
1895
1896 /* Both contig and non-contig reqs doesn't make sense here */
1897 if (rsp->schq_contig[lvl])
1898 rsp->schq[lvl] = 0;
1899
1900 if (rsp->schq[lvl]) {
1901 rsp->schq[lvl] = 1;
1902 rsp->schq_list[lvl][0] = start;
1903 }
1904 return;
1905 }
1906
1907 /* Adjust the queue request count if HW supports
1908 * only one queue per level configuration.
1909 */
1910 if (hw->cap.nix_fixed_txschq_mapping) {
1911 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1912 schq = start + idx;
1913 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1914 rsp->schq_contig[lvl] = 0;
1915 rsp->schq[lvl] = 0;
1916 return;
1917 }
1918
1919 if (rsp->schq_contig[lvl]) {
1920 rsp->schq_contig[lvl] = 1;
1921 set_bit(schq, txsch->schq.bmap);
1922 rsp->schq_contig_list[lvl][0] = schq;
1923 rsp->schq[lvl] = 0;
1924 } else if (rsp->schq[lvl]) {
1925 rsp->schq[lvl] = 1;
1926 set_bit(schq, txsch->schq.bmap);
1927 rsp->schq_list[lvl][0] = schq;
1928 }
1929 return;
1930 }
1931
1932 /* Allocate contiguous queue indices requesty first */
1933 if (rsp->schq_contig[lvl]) {
1934 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1935 txsch->schq.max, start,
1936 rsp->schq_contig[lvl], 0);
1937 if (schq >= end)
1938 rsp->schq_contig[lvl] = 0;
1939 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1940 set_bit(schq, txsch->schq.bmap);
1941 rsp->schq_contig_list[lvl][idx] = schq;
1942 schq++;
1943 }
1944 }
1945
1946 /* Allocate non-contiguous queue indices */
1947 if (rsp->schq[lvl]) {
1948 idx = 0;
1949 for (schq = start; schq < end; schq++) {
1950 if (!test_bit(schq, txsch->schq.bmap)) {
1951 set_bit(schq, txsch->schq.bmap);
1952 rsp->schq_list[lvl][idx++] = schq;
1953 }
1954 if (idx == rsp->schq[lvl])
1955 break;
1956 }
1957 /* Update how many were allocated */
1958 rsp->schq[lvl] = idx;
1959 }
1960 }
1961
rvu_mbox_handler_nix_txsch_alloc(struct rvu * rvu,struct nix_txsch_alloc_req * req,struct nix_txsch_alloc_rsp * rsp)1962 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1963 struct nix_txsch_alloc_req *req,
1964 struct nix_txsch_alloc_rsp *rsp)
1965 {
1966 struct rvu_hwinfo *hw = rvu->hw;
1967 u16 pcifunc = req->hdr.pcifunc;
1968 int link, blkaddr, rc = 0;
1969 int lvl, idx, start, end;
1970 struct nix_txsch *txsch;
1971 struct nix_hw *nix_hw;
1972 u32 *pfvf_map;
1973 int nixlf;
1974 u16 schq;
1975
1976 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1977 if (rc)
1978 return rc;
1979
1980 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1981 if (!nix_hw)
1982 return NIX_AF_ERR_INVALID_NIXBLK;
1983
1984 mutex_lock(&rvu->rsrc_lock);
1985
1986 /* Check if request is valid as per HW capabilities
1987 * and can be accomodated.
1988 */
1989 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1990 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1991 if (rc)
1992 goto err;
1993 }
1994
1995 /* Allocate requested Tx scheduler queues */
1996 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1997 txsch = &nix_hw->txsch[lvl];
1998 pfvf_map = txsch->pfvf_map;
1999
2000 if (!req->schq[lvl] && !req->schq_contig[lvl])
2001 continue;
2002
2003 rsp->schq[lvl] = req->schq[lvl];
2004 rsp->schq_contig[lvl] = req->schq_contig[lvl];
2005
2006 link = nix_get_tx_link(rvu, pcifunc);
2007
2008 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2009 start = link;
2010 end = link;
2011 } else if (hw->cap.nix_fixed_txschq_mapping) {
2012 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2013 } else {
2014 start = 0;
2015 end = txsch->schq.max;
2016 }
2017
2018 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2019
2020 /* Reset queue config */
2021 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2022 schq = rsp->schq_contig_list[lvl][idx];
2023 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2024 NIX_TXSCHQ_CFG_DONE))
2025 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2026 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2027 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2028 }
2029
2030 for (idx = 0; idx < req->schq[lvl]; idx++) {
2031 schq = rsp->schq_list[lvl][idx];
2032 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2033 NIX_TXSCHQ_CFG_DONE))
2034 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2035 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2036 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2037 }
2038 }
2039
2040 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2041 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2042 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2043 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2044 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2045 goto exit;
2046 err:
2047 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2048 exit:
2049 mutex_unlock(&rvu->rsrc_lock);
2050 return rc;
2051 }
2052
nix_smq_flush(struct rvu * rvu,int blkaddr,int smq,u16 pcifunc,int nixlf)2053 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2054 int smq, u16 pcifunc, int nixlf)
2055 {
2056 int pf = rvu_get_pf(pcifunc);
2057 u8 cgx_id = 0, lmac_id = 0;
2058 int err, restore_tx_en = 0;
2059 u64 cfg;
2060
2061 /* enable cgx tx if disabled */
2062 if (is_pf_cgxmapped(rvu, pf)) {
2063 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2064 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2065 lmac_id, true);
2066 }
2067
2068 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2069 /* Do SMQ flush and set enqueue xoff */
2070 cfg |= BIT_ULL(50) | BIT_ULL(49);
2071 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2072
2073 /* Disable backpressure from physical link,
2074 * otherwise SMQ flush may stall.
2075 */
2076 rvu_cgx_enadis_rx_bp(rvu, pf, false);
2077
2078 /* Wait for flush to complete */
2079 err = rvu_poll_reg(rvu, blkaddr,
2080 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2081 if (err)
2082 dev_err(rvu->dev,
2083 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
2084
2085 rvu_cgx_enadis_rx_bp(rvu, pf, true);
2086 /* restore cgx tx state */
2087 if (restore_tx_en)
2088 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2089 return err;
2090 }
2091
nix_txschq_free(struct rvu * rvu,u16 pcifunc)2092 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2093 {
2094 int blkaddr, nixlf, lvl, schq, err;
2095 struct rvu_hwinfo *hw = rvu->hw;
2096 struct nix_txsch *txsch;
2097 struct nix_hw *nix_hw;
2098 u16 map_func;
2099
2100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2101 if (blkaddr < 0)
2102 return NIX_AF_ERR_AF_LF_INVALID;
2103
2104 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2105 if (!nix_hw)
2106 return NIX_AF_ERR_INVALID_NIXBLK;
2107
2108 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2109 if (nixlf < 0)
2110 return NIX_AF_ERR_AF_LF_INVALID;
2111
2112 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2113 mutex_lock(&rvu->rsrc_lock);
2114 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2115 txsch = &nix_hw->txsch[lvl];
2116
2117 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2118 continue;
2119
2120 for (schq = 0; schq < txsch->schq.max; schq++) {
2121 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2122 continue;
2123 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2124 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2125 }
2126 }
2127 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2128 nix_get_tx_link(rvu, pcifunc));
2129
2130 /* On PF cleanup, clear cfg done flag as
2131 * PF would have changed default config.
2132 */
2133 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2134 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2135 schq = nix_get_tx_link(rvu, pcifunc);
2136 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2137 * VF might be using this TL1 queue
2138 */
2139 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2140 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2141 }
2142
2143 /* Flush SMQs */
2144 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2145 for (schq = 0; schq < txsch->schq.max; schq++) {
2146 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2147 continue;
2148 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2149 }
2150
2151 /* Now free scheduler queues to free pool */
2152 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2153 /* TLs above aggregation level are shared across all PF
2154 * and it's VFs, hence skip freeing them.
2155 */
2156 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2157 continue;
2158
2159 txsch = &nix_hw->txsch[lvl];
2160 for (schq = 0; schq < txsch->schq.max; schq++) {
2161 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2162 continue;
2163 rvu_free_rsrc(&txsch->schq, schq);
2164 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2165 }
2166 }
2167 mutex_unlock(&rvu->rsrc_lock);
2168
2169 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2170 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2171 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2172 if (err)
2173 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2174
2175 return 0;
2176 }
2177
nix_txschq_free_one(struct rvu * rvu,struct nix_txsch_free_req * req)2178 static int nix_txschq_free_one(struct rvu *rvu,
2179 struct nix_txsch_free_req *req)
2180 {
2181 struct rvu_hwinfo *hw = rvu->hw;
2182 u16 pcifunc = req->hdr.pcifunc;
2183 int lvl, schq, nixlf, blkaddr;
2184 struct nix_txsch *txsch;
2185 struct nix_hw *nix_hw;
2186 u32 *pfvf_map;
2187 int rc;
2188
2189 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2190 if (blkaddr < 0)
2191 return NIX_AF_ERR_AF_LF_INVALID;
2192
2193 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2194 if (!nix_hw)
2195 return NIX_AF_ERR_INVALID_NIXBLK;
2196
2197 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2198 if (nixlf < 0)
2199 return NIX_AF_ERR_AF_LF_INVALID;
2200
2201 lvl = req->schq_lvl;
2202 schq = req->schq;
2203 txsch = &nix_hw->txsch[lvl];
2204
2205 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2206 return 0;
2207
2208 pfvf_map = txsch->pfvf_map;
2209 mutex_lock(&rvu->rsrc_lock);
2210
2211 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2212 rc = NIX_AF_ERR_TLX_INVALID;
2213 goto err;
2214 }
2215
2216 /* Clear SW_XOFF of this resource only.
2217 * For SMQ level, all path XOFF's
2218 * need to be made clear by user
2219 */
2220 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2221
2222 /* Flush if it is a SMQ. Onus of disabling
2223 * TL2/3 queue links before SMQ flush is on user
2224 */
2225 if (lvl == NIX_TXSCH_LVL_SMQ &&
2226 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2227 rc = NIX_AF_SMQ_FLUSH_FAILED;
2228 goto err;
2229 }
2230
2231 /* Free the resource */
2232 rvu_free_rsrc(&txsch->schq, schq);
2233 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2234 mutex_unlock(&rvu->rsrc_lock);
2235 return 0;
2236 err:
2237 mutex_unlock(&rvu->rsrc_lock);
2238 return rc;
2239 }
2240
rvu_mbox_handler_nix_txsch_free(struct rvu * rvu,struct nix_txsch_free_req * req,struct msg_rsp * rsp)2241 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2242 struct nix_txsch_free_req *req,
2243 struct msg_rsp *rsp)
2244 {
2245 if (req->flags & TXSCHQ_FREE_ALL)
2246 return nix_txschq_free(rvu, req->hdr.pcifunc);
2247 else
2248 return nix_txschq_free_one(rvu, req);
2249 }
2250
is_txschq_hierarchy_valid(struct rvu * rvu,u16 pcifunc,int blkaddr,int lvl,u64 reg,u64 regval)2251 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2252 int lvl, u64 reg, u64 regval)
2253 {
2254 u64 regbase = reg & 0xFFFF;
2255 u16 schq, parent;
2256
2257 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2258 return false;
2259
2260 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2261 /* Check if this schq belongs to this PF/VF or not */
2262 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2263 return false;
2264
2265 parent = (regval >> 16) & 0x1FF;
2266 /* Validate MDQ's TL4 parent */
2267 if (regbase == NIX_AF_MDQX_PARENT(0) &&
2268 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2269 return false;
2270
2271 /* Validate TL4's TL3 parent */
2272 if (regbase == NIX_AF_TL4X_PARENT(0) &&
2273 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2274 return false;
2275
2276 /* Validate TL3's TL2 parent */
2277 if (regbase == NIX_AF_TL3X_PARENT(0) &&
2278 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2279 return false;
2280
2281 /* Validate TL2's TL1 parent */
2282 if (regbase == NIX_AF_TL2X_PARENT(0) &&
2283 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2284 return false;
2285
2286 return true;
2287 }
2288
is_txschq_shaping_valid(struct rvu_hwinfo * hw,int lvl,u64 reg)2289 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2290 {
2291 u64 regbase;
2292
2293 if (hw->cap.nix_shaping)
2294 return true;
2295
2296 /* If shaping and coloring is not supported, then
2297 * *_CIR and *_PIR registers should not be configured.
2298 */
2299 regbase = reg & 0xFFFF;
2300
2301 switch (lvl) {
2302 case NIX_TXSCH_LVL_TL1:
2303 if (regbase == NIX_AF_TL1X_CIR(0))
2304 return false;
2305 break;
2306 case NIX_TXSCH_LVL_TL2:
2307 if (regbase == NIX_AF_TL2X_CIR(0) ||
2308 regbase == NIX_AF_TL2X_PIR(0))
2309 return false;
2310 break;
2311 case NIX_TXSCH_LVL_TL3:
2312 if (regbase == NIX_AF_TL3X_CIR(0) ||
2313 regbase == NIX_AF_TL3X_PIR(0))
2314 return false;
2315 break;
2316 case NIX_TXSCH_LVL_TL4:
2317 if (regbase == NIX_AF_TL4X_CIR(0) ||
2318 regbase == NIX_AF_TL4X_PIR(0))
2319 return false;
2320 break;
2321 case NIX_TXSCH_LVL_MDQ:
2322 if (regbase == NIX_AF_MDQX_CIR(0) ||
2323 regbase == NIX_AF_MDQX_PIR(0))
2324 return false;
2325 break;
2326 }
2327 return true;
2328 }
2329
nix_tl1_default_cfg(struct rvu * rvu,struct nix_hw * nix_hw,u16 pcifunc,int blkaddr)2330 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2331 u16 pcifunc, int blkaddr)
2332 {
2333 u32 *pfvf_map;
2334 int schq;
2335
2336 schq = nix_get_tx_link(rvu, pcifunc);
2337 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2338 /* Skip if PF has already done the config */
2339 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2340 return;
2341 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2342 (TXSCH_TL1_DFLT_RR_PRIO << 1));
2343
2344 /* On OcteonTx2 the config was in bytes and newer silcons
2345 * it's changed to weight.
2346 */
2347 if (!rvu->hw->cap.nix_common_dwrr_mtu)
2348 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2349 TXSCH_TL1_DFLT_RR_QTM);
2350 else
2351 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2352 CN10K_MAX_DWRR_WEIGHT);
2353
2354 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2355 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2356 }
2357
2358 /* Register offset - [15:0]
2359 * Scheduler Queue number - [25:16]
2360 */
2361 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2362
nix_txschq_cfg_read(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2363 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2364 int blkaddr, struct nix_txschq_config *req,
2365 struct nix_txschq_config *rsp)
2366 {
2367 u16 pcifunc = req->hdr.pcifunc;
2368 int idx, schq;
2369 u64 reg;
2370
2371 for (idx = 0; idx < req->num_regs; idx++) {
2372 reg = req->reg[idx];
2373 reg &= NIX_TX_SCHQ_MASK;
2374 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2375 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2376 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2377 return NIX_AF_INVAL_TXSCHQ_CFG;
2378 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2379 }
2380 rsp->lvl = req->lvl;
2381 rsp->num_regs = req->num_regs;
2382 return 0;
2383 }
2384
rvu_nix_tx_tl2_cfg(struct rvu * rvu,int blkaddr,u16 pcifunc,struct nix_txsch * txsch)2385 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
2386 u16 pcifunc, struct nix_txsch *txsch)
2387 {
2388 struct rvu_hwinfo *hw = rvu->hw;
2389 int lbk_link_start, lbk_links;
2390 u8 pf = rvu_get_pf(pcifunc);
2391 int schq;
2392
2393 if (!is_pf_cgxmapped(rvu, pf))
2394 return;
2395
2396 lbk_link_start = hw->cgx_links;
2397
2398 for (schq = 0; schq < txsch->schq.max; schq++) {
2399 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2400 continue;
2401 /* Enable all LBK links with channel 63 by default so that
2402 * packets can be sent to LBK with a NPC TX MCAM rule
2403 */
2404 lbk_links = hw->lbk_links;
2405 while (lbk_links--)
2406 rvu_write64(rvu, blkaddr,
2407 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2408 lbk_link_start +
2409 lbk_links),
2410 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
2411 }
2412 }
2413
rvu_mbox_handler_nix_txschq_cfg(struct rvu * rvu,struct nix_txschq_config * req,struct nix_txschq_config * rsp)2414 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2415 struct nix_txschq_config *req,
2416 struct nix_txschq_config *rsp)
2417 {
2418 u64 reg, val, regval, schq_regbase, val_mask;
2419 struct rvu_hwinfo *hw = rvu->hw;
2420 u16 pcifunc = req->hdr.pcifunc;
2421 struct nix_txsch *txsch;
2422 struct nix_hw *nix_hw;
2423 int blkaddr, idx, err;
2424 int nixlf, schq;
2425 u32 *pfvf_map;
2426
2427 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2428 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2429 return NIX_AF_INVAL_TXSCHQ_CFG;
2430
2431 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2432 if (err)
2433 return err;
2434
2435 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2436 if (!nix_hw)
2437 return NIX_AF_ERR_INVALID_NIXBLK;
2438
2439 if (req->read)
2440 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2441
2442 txsch = &nix_hw->txsch[req->lvl];
2443 pfvf_map = txsch->pfvf_map;
2444
2445 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2446 pcifunc & RVU_PFVF_FUNC_MASK) {
2447 mutex_lock(&rvu->rsrc_lock);
2448 if (req->lvl == NIX_TXSCH_LVL_TL1)
2449 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2450 mutex_unlock(&rvu->rsrc_lock);
2451 return 0;
2452 }
2453
2454 for (idx = 0; idx < req->num_regs; idx++) {
2455 reg = req->reg[idx];
2456 reg &= NIX_TX_SCHQ_MASK;
2457 regval = req->regval[idx];
2458 schq_regbase = reg & 0xFFFF;
2459 val_mask = req->regval_mask[idx];
2460
2461 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2462 txsch->lvl, reg, regval))
2463 return NIX_AF_INVAL_TXSCHQ_CFG;
2464
2465 /* Check if shaping and coloring is supported */
2466 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2467 continue;
2468
2469 val = rvu_read64(rvu, blkaddr, reg);
2470 regval = (val & val_mask) | (regval & ~val_mask);
2471
2472 /* Handle shaping state toggle specially */
2473 if (hw->cap.nix_shaper_toggle_wait &&
2474 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2475 req->lvl, reg, regval))
2476 continue;
2477
2478 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2479 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2480 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2481 pcifunc, 0);
2482 regval &= ~(0x7FULL << 24);
2483 regval |= ((u64)nixlf << 24);
2484 }
2485
2486 /* Clear 'BP_ENA' config, if it's not allowed */
2487 if (!hw->cap.nix_tx_link_bp) {
2488 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2489 (schq_regbase & 0xFF00) ==
2490 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2491 regval &= ~BIT_ULL(13);
2492 }
2493
2494 /* Mark config as done for TL1 by PF */
2495 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2496 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2497 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2498 mutex_lock(&rvu->rsrc_lock);
2499 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2500 NIX_TXSCHQ_CFG_DONE);
2501 mutex_unlock(&rvu->rsrc_lock);
2502 }
2503
2504 /* SMQ flush is special hence split register writes such
2505 * that flush first and write rest of the bits later.
2506 */
2507 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2508 (regval & BIT_ULL(49))) {
2509 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2510 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2511 regval &= ~BIT_ULL(49);
2512 }
2513 rvu_write64(rvu, blkaddr, reg, regval);
2514 }
2515
2516 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2517 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2518 return 0;
2519 }
2520
nix_rx_vtag_cfg(struct rvu * rvu,int nixlf,int blkaddr,struct nix_vtag_config * req)2521 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2522 struct nix_vtag_config *req)
2523 {
2524 u64 regval = req->vtag_size;
2525
2526 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2527 req->vtag_size > VTAGSIZE_T8)
2528 return -EINVAL;
2529
2530 /* RX VTAG Type 7 reserved for vf vlan */
2531 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2532 return NIX_AF_ERR_RX_VTAG_INUSE;
2533
2534 if (req->rx.capture_vtag)
2535 regval |= BIT_ULL(5);
2536 if (req->rx.strip_vtag)
2537 regval |= BIT_ULL(4);
2538
2539 rvu_write64(rvu, blkaddr,
2540 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2541 return 0;
2542 }
2543
nix_tx_vtag_free(struct rvu * rvu,int blkaddr,u16 pcifunc,int index)2544 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2545 u16 pcifunc, int index)
2546 {
2547 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2548 struct nix_txvlan *vlan;
2549
2550 if (!nix_hw)
2551 return NIX_AF_ERR_INVALID_NIXBLK;
2552
2553 vlan = &nix_hw->txvlan;
2554 if (vlan->entry2pfvf_map[index] != pcifunc)
2555 return NIX_AF_ERR_PARAM;
2556
2557 rvu_write64(rvu, blkaddr,
2558 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2559 rvu_write64(rvu, blkaddr,
2560 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2561
2562 vlan->entry2pfvf_map[index] = 0;
2563 rvu_free_rsrc(&vlan->rsrc, index);
2564
2565 return 0;
2566 }
2567
nix_free_tx_vtag_entries(struct rvu * rvu,u16 pcifunc)2568 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2569 {
2570 struct nix_txvlan *vlan;
2571 struct nix_hw *nix_hw;
2572 int index, blkaddr;
2573
2574 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2575 if (blkaddr < 0)
2576 return;
2577
2578 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2579 if (!nix_hw)
2580 return;
2581
2582 vlan = &nix_hw->txvlan;
2583
2584 mutex_lock(&vlan->rsrc_lock);
2585 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2586 for (index = 0; index < vlan->rsrc.max; index++) {
2587 if (vlan->entry2pfvf_map[index] == pcifunc)
2588 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2589 }
2590 mutex_unlock(&vlan->rsrc_lock);
2591 }
2592
nix_tx_vtag_alloc(struct rvu * rvu,int blkaddr,u64 vtag,u8 size)2593 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2594 u64 vtag, u8 size)
2595 {
2596 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2597 struct nix_txvlan *vlan;
2598 u64 regval;
2599 int index;
2600
2601 if (!nix_hw)
2602 return NIX_AF_ERR_INVALID_NIXBLK;
2603
2604 vlan = &nix_hw->txvlan;
2605
2606 mutex_lock(&vlan->rsrc_lock);
2607
2608 index = rvu_alloc_rsrc(&vlan->rsrc);
2609 if (index < 0) {
2610 mutex_unlock(&vlan->rsrc_lock);
2611 return index;
2612 }
2613
2614 mutex_unlock(&vlan->rsrc_lock);
2615
2616 regval = size ? vtag : vtag << 32;
2617
2618 rvu_write64(rvu, blkaddr,
2619 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2620 rvu_write64(rvu, blkaddr,
2621 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2622
2623 return index;
2624 }
2625
nix_tx_vtag_decfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req)2626 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2627 struct nix_vtag_config *req)
2628 {
2629 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2630 u16 pcifunc = req->hdr.pcifunc;
2631 int idx0 = req->tx.vtag0_idx;
2632 int idx1 = req->tx.vtag1_idx;
2633 struct nix_txvlan *vlan;
2634 int err = 0;
2635
2636 if (!nix_hw)
2637 return NIX_AF_ERR_INVALID_NIXBLK;
2638
2639 vlan = &nix_hw->txvlan;
2640 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2641 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2642 vlan->entry2pfvf_map[idx1] != pcifunc)
2643 return NIX_AF_ERR_PARAM;
2644
2645 mutex_lock(&vlan->rsrc_lock);
2646
2647 if (req->tx.free_vtag0) {
2648 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2649 if (err)
2650 goto exit;
2651 }
2652
2653 if (req->tx.free_vtag1)
2654 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2655
2656 exit:
2657 mutex_unlock(&vlan->rsrc_lock);
2658 return err;
2659 }
2660
nix_tx_vtag_cfg(struct rvu * rvu,int blkaddr,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)2661 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2662 struct nix_vtag_config *req,
2663 struct nix_vtag_config_rsp *rsp)
2664 {
2665 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2666 struct nix_txvlan *vlan;
2667 u16 pcifunc = req->hdr.pcifunc;
2668
2669 if (!nix_hw)
2670 return NIX_AF_ERR_INVALID_NIXBLK;
2671
2672 vlan = &nix_hw->txvlan;
2673 if (req->tx.cfg_vtag0) {
2674 rsp->vtag0_idx =
2675 nix_tx_vtag_alloc(rvu, blkaddr,
2676 req->tx.vtag0, req->vtag_size);
2677
2678 if (rsp->vtag0_idx < 0)
2679 return NIX_AF_ERR_TX_VTAG_NOSPC;
2680
2681 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2682 }
2683
2684 if (req->tx.cfg_vtag1) {
2685 rsp->vtag1_idx =
2686 nix_tx_vtag_alloc(rvu, blkaddr,
2687 req->tx.vtag1, req->vtag_size);
2688
2689 if (rsp->vtag1_idx < 0)
2690 goto err_free;
2691
2692 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2693 }
2694
2695 return 0;
2696
2697 err_free:
2698 if (req->tx.cfg_vtag0)
2699 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2700
2701 return NIX_AF_ERR_TX_VTAG_NOSPC;
2702 }
2703
rvu_mbox_handler_nix_vtag_cfg(struct rvu * rvu,struct nix_vtag_config * req,struct nix_vtag_config_rsp * rsp)2704 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2705 struct nix_vtag_config *req,
2706 struct nix_vtag_config_rsp *rsp)
2707 {
2708 u16 pcifunc = req->hdr.pcifunc;
2709 int blkaddr, nixlf, err;
2710
2711 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2712 if (err)
2713 return err;
2714
2715 if (req->cfg_type) {
2716 /* rx vtag configuration */
2717 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2718 if (err)
2719 return NIX_AF_ERR_PARAM;
2720 } else {
2721 /* tx vtag configuration */
2722 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2723 (req->tx.free_vtag0 || req->tx.free_vtag1))
2724 return NIX_AF_ERR_PARAM;
2725
2726 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2727 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2728
2729 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2730 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2731 }
2732
2733 return 0;
2734 }
2735
nix_blk_setup_mce(struct rvu * rvu,struct nix_hw * nix_hw,int mce,u8 op,u16 pcifunc,int next,bool eol)2736 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2737 int mce, u8 op, u16 pcifunc, int next, bool eol)
2738 {
2739 struct nix_aq_enq_req aq_req;
2740 int err;
2741
2742 aq_req.hdr.pcifunc = 0;
2743 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2744 aq_req.op = op;
2745 aq_req.qidx = mce;
2746
2747 /* Use RSS with RSS index 0 */
2748 aq_req.mce.op = 1;
2749 aq_req.mce.index = 0;
2750 aq_req.mce.eol = eol;
2751 aq_req.mce.pf_func = pcifunc;
2752 aq_req.mce.next = next;
2753
2754 /* All fields valid */
2755 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2756
2757 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2758 if (err) {
2759 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2760 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2761 return err;
2762 }
2763 return 0;
2764 }
2765
nix_update_mce_list_entry(struct nix_mce_list * mce_list,u16 pcifunc,bool add)2766 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2767 u16 pcifunc, bool add)
2768 {
2769 struct mce *mce, *tail = NULL;
2770 bool delete = false;
2771
2772 /* Scan through the current list */
2773 hlist_for_each_entry(mce, &mce_list->head, node) {
2774 /* If already exists, then delete */
2775 if (mce->pcifunc == pcifunc && !add) {
2776 delete = true;
2777 break;
2778 } else if (mce->pcifunc == pcifunc && add) {
2779 /* entry already exists */
2780 return 0;
2781 }
2782 tail = mce;
2783 }
2784
2785 if (delete) {
2786 hlist_del(&mce->node);
2787 kfree(mce);
2788 mce_list->count--;
2789 return 0;
2790 }
2791
2792 if (!add)
2793 return 0;
2794
2795 /* Add a new one to the list, at the tail */
2796 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2797 if (!mce)
2798 return -ENOMEM;
2799 mce->pcifunc = pcifunc;
2800 if (!tail)
2801 hlist_add_head(&mce->node, &mce_list->head);
2802 else
2803 hlist_add_behind(&mce->node, &tail->node);
2804 mce_list->count++;
2805 return 0;
2806 }
2807
nix_update_mce_list(struct rvu * rvu,u16 pcifunc,struct nix_mce_list * mce_list,int mce_idx,int mcam_index,bool add)2808 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2809 struct nix_mce_list *mce_list,
2810 int mce_idx, int mcam_index, bool add)
2811 {
2812 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2813 struct npc_mcam *mcam = &rvu->hw->mcam;
2814 struct nix_mcast *mcast;
2815 struct nix_hw *nix_hw;
2816 struct mce *mce;
2817
2818 if (!mce_list)
2819 return -EINVAL;
2820
2821 /* Get this PF/VF func's MCE index */
2822 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2823
2824 if (idx > (mce_idx + mce_list->max)) {
2825 dev_err(rvu->dev,
2826 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2827 __func__, idx, mce_list->max,
2828 pcifunc >> RVU_PFVF_PF_SHIFT);
2829 return -EINVAL;
2830 }
2831
2832 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2833 if (err)
2834 return err;
2835
2836 mcast = &nix_hw->mcast;
2837 mutex_lock(&mcast->mce_lock);
2838
2839 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2840 if (err)
2841 goto end;
2842
2843 /* Disable MCAM entry in NPC */
2844 if (!mce_list->count) {
2845 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2846 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2847 goto end;
2848 }
2849
2850 /* Dump the updated list to HW */
2851 idx = mce_idx;
2852 last_idx = idx + mce_list->count - 1;
2853 hlist_for_each_entry(mce, &mce_list->head, node) {
2854 if (idx > last_idx)
2855 break;
2856
2857 next_idx = idx + 1;
2858 /* EOL should be set in last MCE */
2859 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2860 mce->pcifunc, next_idx,
2861 (next_idx > last_idx) ? true : false);
2862 if (err)
2863 goto end;
2864 idx++;
2865 }
2866
2867 end:
2868 mutex_unlock(&mcast->mce_lock);
2869 return err;
2870 }
2871
nix_get_mce_list(struct rvu * rvu,u16 pcifunc,int type,struct nix_mce_list ** mce_list,int * mce_idx)2872 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2873 struct nix_mce_list **mce_list, int *mce_idx)
2874 {
2875 struct rvu_hwinfo *hw = rvu->hw;
2876 struct rvu_pfvf *pfvf;
2877
2878 if (!hw->cap.nix_rx_multicast ||
2879 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2880 *mce_list = NULL;
2881 *mce_idx = 0;
2882 return;
2883 }
2884
2885 /* Get this PF/VF func's MCE index */
2886 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2887
2888 if (type == NIXLF_BCAST_ENTRY) {
2889 *mce_list = &pfvf->bcast_mce_list;
2890 *mce_idx = pfvf->bcast_mce_idx;
2891 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2892 *mce_list = &pfvf->mcast_mce_list;
2893 *mce_idx = pfvf->mcast_mce_idx;
2894 } else if (type == NIXLF_PROMISC_ENTRY) {
2895 *mce_list = &pfvf->promisc_mce_list;
2896 *mce_idx = pfvf->promisc_mce_idx;
2897 } else {
2898 *mce_list = NULL;
2899 *mce_idx = 0;
2900 }
2901 }
2902
nix_update_mce_rule(struct rvu * rvu,u16 pcifunc,int type,bool add)2903 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2904 int type, bool add)
2905 {
2906 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2907 struct npc_mcam *mcam = &rvu->hw->mcam;
2908 struct rvu_hwinfo *hw = rvu->hw;
2909 struct nix_mce_list *mce_list;
2910 int pf;
2911
2912 /* skip multicast pkt replication for AF's VFs & SDP links */
2913 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
2914 return 0;
2915
2916 if (!hw->cap.nix_rx_multicast)
2917 return 0;
2918
2919 pf = rvu_get_pf(pcifunc);
2920 if (!is_pf_cgxmapped(rvu, pf))
2921 return 0;
2922
2923 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2924 if (blkaddr < 0)
2925 return -EINVAL;
2926
2927 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2928 if (nixlf < 0)
2929 return -EINVAL;
2930
2931 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2932
2933 mcam_index = npc_get_nixlf_mcam_index(mcam,
2934 pcifunc & ~RVU_PFVF_FUNC_MASK,
2935 nixlf, type);
2936 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2937 mce_idx, mcam_index, add);
2938 return err;
2939 }
2940
nix_setup_mce_tables(struct rvu * rvu,struct nix_hw * nix_hw)2941 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2942 {
2943 struct nix_mcast *mcast = &nix_hw->mcast;
2944 int err, pf, numvfs, idx;
2945 struct rvu_pfvf *pfvf;
2946 u16 pcifunc;
2947 u64 cfg;
2948
2949 /* Skip PF0 (i.e AF) */
2950 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2951 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2952 /* If PF is not enabled, nothing to do */
2953 if (!((cfg >> 20) & 0x01))
2954 continue;
2955 /* Get numVFs attached to this PF */
2956 numvfs = (cfg >> 12) & 0xFF;
2957
2958 pfvf = &rvu->pf[pf];
2959
2960 /* This NIX0/1 block mapped to PF ? */
2961 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2962 continue;
2963
2964 /* save start idx of broadcast mce list */
2965 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2966 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2967
2968 /* save start idx of multicast mce list */
2969 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2970 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2971
2972 /* save the start idx of promisc mce list */
2973 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2974 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2975
2976 for (idx = 0; idx < (numvfs + 1); idx++) {
2977 /* idx-0 is for PF, followed by VFs */
2978 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2979 pcifunc |= idx;
2980 /* Add dummy entries now, so that we don't have to check
2981 * for whether AQ_OP should be INIT/WRITE later on.
2982 * Will be updated when a NIXLF is attached/detached to
2983 * these PF/VFs.
2984 */
2985 err = nix_blk_setup_mce(rvu, nix_hw,
2986 pfvf->bcast_mce_idx + idx,
2987 NIX_AQ_INSTOP_INIT,
2988 pcifunc, 0, true);
2989 if (err)
2990 return err;
2991
2992 /* add dummy entries to multicast mce list */
2993 err = nix_blk_setup_mce(rvu, nix_hw,
2994 pfvf->mcast_mce_idx + idx,
2995 NIX_AQ_INSTOP_INIT,
2996 pcifunc, 0, true);
2997 if (err)
2998 return err;
2999
3000 /* add dummy entries to promisc mce list */
3001 err = nix_blk_setup_mce(rvu, nix_hw,
3002 pfvf->promisc_mce_idx + idx,
3003 NIX_AQ_INSTOP_INIT,
3004 pcifunc, 0, true);
3005 if (err)
3006 return err;
3007 }
3008 }
3009 return 0;
3010 }
3011
nix_setup_mcast(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3012 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3013 {
3014 struct nix_mcast *mcast = &nix_hw->mcast;
3015 struct rvu_hwinfo *hw = rvu->hw;
3016 int err, size;
3017
3018 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3019 size = (1ULL << size);
3020
3021 /* Alloc memory for multicast/mirror replication entries */
3022 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3023 (256UL << MC_TBL_SIZE), size);
3024 if (err)
3025 return -ENOMEM;
3026
3027 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3028 (u64)mcast->mce_ctx->iova);
3029
3030 /* Set max list length equal to max no of VFs per PF + PF itself */
3031 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3032 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3033
3034 /* Alloc memory for multicast replication buffers */
3035 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3036 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3037 (8UL << MC_BUF_CNT), size);
3038 if (err)
3039 return -ENOMEM;
3040
3041 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3042 (u64)mcast->mcast_buf->iova);
3043
3044 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3045 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3046
3047 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3048 BIT_ULL(63) | (mcast->replay_pkind << 24) |
3049 BIT_ULL(20) | MC_BUF_CNT);
3050
3051 mutex_init(&mcast->mce_lock);
3052
3053 return nix_setup_mce_tables(rvu, nix_hw);
3054 }
3055
nix_setup_txvlan(struct rvu * rvu,struct nix_hw * nix_hw)3056 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3057 {
3058 struct nix_txvlan *vlan = &nix_hw->txvlan;
3059 int err;
3060
3061 /* Allocate resource bimap for tx vtag def registers*/
3062 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3063 err = rvu_alloc_bitmap(&vlan->rsrc);
3064 if (err)
3065 return -ENOMEM;
3066
3067 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3068 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3069 sizeof(u16), GFP_KERNEL);
3070 if (!vlan->entry2pfvf_map)
3071 goto free_mem;
3072
3073 mutex_init(&vlan->rsrc_lock);
3074 return 0;
3075
3076 free_mem:
3077 kfree(vlan->rsrc.bmap);
3078 return -ENOMEM;
3079 }
3080
nix_setup_txschq(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3081 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3082 {
3083 struct nix_txsch *txsch;
3084 int err, lvl, schq;
3085 u64 cfg, reg;
3086
3087 /* Get scheduler queue count of each type and alloc
3088 * bitmap for each for alloc/free/attach operations.
3089 */
3090 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3091 txsch = &nix_hw->txsch[lvl];
3092 txsch->lvl = lvl;
3093 switch (lvl) {
3094 case NIX_TXSCH_LVL_SMQ:
3095 reg = NIX_AF_MDQ_CONST;
3096 break;
3097 case NIX_TXSCH_LVL_TL4:
3098 reg = NIX_AF_TL4_CONST;
3099 break;
3100 case NIX_TXSCH_LVL_TL3:
3101 reg = NIX_AF_TL3_CONST;
3102 break;
3103 case NIX_TXSCH_LVL_TL2:
3104 reg = NIX_AF_TL2_CONST;
3105 break;
3106 case NIX_TXSCH_LVL_TL1:
3107 reg = NIX_AF_TL1_CONST;
3108 break;
3109 }
3110 cfg = rvu_read64(rvu, blkaddr, reg);
3111 txsch->schq.max = cfg & 0xFFFF;
3112 err = rvu_alloc_bitmap(&txsch->schq);
3113 if (err)
3114 return err;
3115
3116 /* Allocate memory for scheduler queues to
3117 * PF/VF pcifunc mapping info.
3118 */
3119 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3120 sizeof(u32), GFP_KERNEL);
3121 if (!txsch->pfvf_map)
3122 return -ENOMEM;
3123 for (schq = 0; schq < txsch->schq.max; schq++)
3124 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3125 }
3126
3127 /* Setup a default value of 8192 as DWRR MTU */
3128 if (rvu->hw->cap.nix_common_dwrr_mtu) {
3129 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
3130 convert_bytes_to_dwrr_mtu(8192));
3131 rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
3132 convert_bytes_to_dwrr_mtu(8192));
3133 }
3134
3135 return 0;
3136 }
3137
rvu_nix_reserve_mark_format(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr,u32 cfg)3138 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3139 int blkaddr, u32 cfg)
3140 {
3141 int fmt_idx;
3142
3143 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3144 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3145 return fmt_idx;
3146 }
3147 if (fmt_idx >= nix_hw->mark_format.total)
3148 return -ERANGE;
3149
3150 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3151 nix_hw->mark_format.cfg[fmt_idx] = cfg;
3152 nix_hw->mark_format.in_use++;
3153 return fmt_idx;
3154 }
3155
nix_af_mark_format_setup(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)3156 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3157 int blkaddr)
3158 {
3159 u64 cfgs[] = {
3160 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3161 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3162 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3163 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3164 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3165 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3166 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3167 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3168 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3169 };
3170 int i, rc;
3171 u64 total;
3172
3173 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3174 nix_hw->mark_format.total = (u8)total;
3175 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3176 GFP_KERNEL);
3177 if (!nix_hw->mark_format.cfg)
3178 return -ENOMEM;
3179 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3180 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3181 if (rc < 0)
3182 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3183 i, rc);
3184 }
3185
3186 return 0;
3187 }
3188
rvu_get_lbk_link_max_frs(struct rvu * rvu,u16 * max_mtu)3189 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3190 {
3191 /* CN10K supports LBK FIFO size 72 KB */
3192 if (rvu->hw->lbk_bufsize == 0x12000)
3193 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3194 else
3195 *max_mtu = NIC_HW_MAX_FRS;
3196 }
3197
rvu_get_lmac_link_max_frs(struct rvu * rvu,u16 * max_mtu)3198 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3199 {
3200 /* RPM supports FIFO len 128 KB */
3201 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
3202 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3203 else
3204 *max_mtu = NIC_HW_MAX_FRS;
3205 }
3206
rvu_mbox_handler_nix_get_hw_info(struct rvu * rvu,struct msg_req * req,struct nix_hw_info * rsp)3207 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3208 struct nix_hw_info *rsp)
3209 {
3210 u16 pcifunc = req->hdr.pcifunc;
3211 u64 dwrr_mtu;
3212 int blkaddr;
3213
3214 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3215 if (blkaddr < 0)
3216 return NIX_AF_ERR_AF_LF_INVALID;
3217
3218 if (is_afvf(pcifunc))
3219 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3220 else
3221 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3222
3223 rsp->min_mtu = NIC_HW_MIN_FRS;
3224
3225 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
3226 /* Return '1' on OTx2 */
3227 rsp->rpm_dwrr_mtu = 1;
3228 rsp->sdp_dwrr_mtu = 1;
3229 return 0;
3230 }
3231
3232 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
3233 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3234
3235 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
3236 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3237
3238 return 0;
3239 }
3240
rvu_mbox_handler_nix_stats_rst(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)3241 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3242 struct msg_rsp *rsp)
3243 {
3244 u16 pcifunc = req->hdr.pcifunc;
3245 int i, nixlf, blkaddr, err;
3246 u64 stats;
3247
3248 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3249 if (err)
3250 return err;
3251
3252 /* Get stats count supported by HW */
3253 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3254
3255 /* Reset tx stats */
3256 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3257 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3258
3259 /* Reset rx stats */
3260 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3261 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3262
3263 return 0;
3264 }
3265
3266 /* Returns the ALG index to be set into NPC_RX_ACTION */
get_flowkey_alg_idx(struct nix_hw * nix_hw,u32 flow_cfg)3267 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3268 {
3269 int i;
3270
3271 /* Scan over exiting algo entries to find a match */
3272 for (i = 0; i < nix_hw->flowkey.in_use; i++)
3273 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3274 return i;
3275
3276 return -ERANGE;
3277 }
3278
set_flowkey_fields(struct nix_rx_flowkey_alg * alg,u32 flow_cfg)3279 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3280 {
3281 int idx, nr_field, key_off, field_marker, keyoff_marker;
3282 int max_key_off, max_bit_pos, group_member;
3283 struct nix_rx_flowkey_alg *field;
3284 struct nix_rx_flowkey_alg tmp;
3285 u32 key_type, valid_key;
3286 int l4_key_offset = 0;
3287
3288 if (!alg)
3289 return -EINVAL;
3290
3291 #define FIELDS_PER_ALG 5
3292 #define MAX_KEY_OFF 40
3293 /* Clear all fields */
3294 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3295
3296 /* Each of the 32 possible flow key algorithm definitions should
3297 * fall into above incremental config (except ALG0). Otherwise a
3298 * single NPC MCAM entry is not sufficient for supporting RSS.
3299 *
3300 * If a different definition or combination needed then NPC MCAM
3301 * has to be programmed to filter such pkts and it's action should
3302 * point to this definition to calculate flowtag or hash.
3303 *
3304 * The `for loop` goes over _all_ protocol field and the following
3305 * variables depicts the state machine forward progress logic.
3306 *
3307 * keyoff_marker - Enabled when hash byte length needs to be accounted
3308 * in field->key_offset update.
3309 * field_marker - Enabled when a new field needs to be selected.
3310 * group_member - Enabled when protocol is part of a group.
3311 */
3312
3313 keyoff_marker = 0; max_key_off = 0; group_member = 0;
3314 nr_field = 0; key_off = 0; field_marker = 1;
3315 field = &tmp; max_bit_pos = fls(flow_cfg);
3316 for (idx = 0;
3317 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3318 key_off < MAX_KEY_OFF; idx++) {
3319 key_type = BIT(idx);
3320 valid_key = flow_cfg & key_type;
3321 /* Found a field marker, reset the field values */
3322 if (field_marker)
3323 memset(&tmp, 0, sizeof(tmp));
3324
3325 field_marker = true;
3326 keyoff_marker = true;
3327 switch (key_type) {
3328 case NIX_FLOW_KEY_TYPE_PORT:
3329 field->sel_chan = true;
3330 /* This should be set to 1, when SEL_CHAN is set */
3331 field->bytesm1 = 1;
3332 break;
3333 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3334 field->lid = NPC_LID_LC;
3335 field->hdr_offset = 9; /* offset */
3336 field->bytesm1 = 0; /* 1 byte */
3337 field->ltype_match = NPC_LT_LC_IP;
3338 field->ltype_mask = 0xF;
3339 break;
3340 case NIX_FLOW_KEY_TYPE_IPV4:
3341 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3342 field->lid = NPC_LID_LC;
3343 field->ltype_match = NPC_LT_LC_IP;
3344 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3345 field->lid = NPC_LID_LG;
3346 field->ltype_match = NPC_LT_LG_TU_IP;
3347 }
3348 field->hdr_offset = 12; /* SIP offset */
3349 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3350 field->ltype_mask = 0xF; /* Match only IPv4 */
3351 keyoff_marker = false;
3352 break;
3353 case NIX_FLOW_KEY_TYPE_IPV6:
3354 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3355 field->lid = NPC_LID_LC;
3356 field->ltype_match = NPC_LT_LC_IP6;
3357 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3358 field->lid = NPC_LID_LG;
3359 field->ltype_match = NPC_LT_LG_TU_IP6;
3360 }
3361 field->hdr_offset = 8; /* SIP offset */
3362 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3363 field->ltype_mask = 0xF; /* Match only IPv6 */
3364 break;
3365 case NIX_FLOW_KEY_TYPE_TCP:
3366 case NIX_FLOW_KEY_TYPE_UDP:
3367 case NIX_FLOW_KEY_TYPE_SCTP:
3368 case NIX_FLOW_KEY_TYPE_INNR_TCP:
3369 case NIX_FLOW_KEY_TYPE_INNR_UDP:
3370 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3371 field->lid = NPC_LID_LD;
3372 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3373 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3374 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3375 field->lid = NPC_LID_LH;
3376 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3377
3378 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3379 * so no need to change the ltype_match, just change
3380 * the lid for inner protocols
3381 */
3382 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3383 (int)NPC_LT_LH_TU_TCP);
3384 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3385 (int)NPC_LT_LH_TU_UDP);
3386 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3387 (int)NPC_LT_LH_TU_SCTP);
3388
3389 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3390 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3391 valid_key) {
3392 field->ltype_match |= NPC_LT_LD_TCP;
3393 group_member = true;
3394 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3395 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3396 valid_key) {
3397 field->ltype_match |= NPC_LT_LD_UDP;
3398 group_member = true;
3399 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3400 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3401 valid_key) {
3402 field->ltype_match |= NPC_LT_LD_SCTP;
3403 group_member = true;
3404 }
3405 field->ltype_mask = ~field->ltype_match;
3406 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3407 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3408 /* Handle the case where any of the group item
3409 * is enabled in the group but not the final one
3410 */
3411 if (group_member) {
3412 valid_key = true;
3413 group_member = false;
3414 }
3415 } else {
3416 field_marker = false;
3417 keyoff_marker = false;
3418 }
3419
3420 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3421 * remember the TCP key offset of 40 byte hash key.
3422 */
3423 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3424 l4_key_offset = key_off;
3425 break;
3426 case NIX_FLOW_KEY_TYPE_NVGRE:
3427 field->lid = NPC_LID_LD;
3428 field->hdr_offset = 4; /* VSID offset */
3429 field->bytesm1 = 2;
3430 field->ltype_match = NPC_LT_LD_NVGRE;
3431 field->ltype_mask = 0xF;
3432 break;
3433 case NIX_FLOW_KEY_TYPE_VXLAN:
3434 case NIX_FLOW_KEY_TYPE_GENEVE:
3435 field->lid = NPC_LID_LE;
3436 field->bytesm1 = 2;
3437 field->hdr_offset = 4;
3438 field->ltype_mask = 0xF;
3439 field_marker = false;
3440 keyoff_marker = false;
3441
3442 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3443 field->ltype_match |= NPC_LT_LE_VXLAN;
3444 group_member = true;
3445 }
3446
3447 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3448 field->ltype_match |= NPC_LT_LE_GENEVE;
3449 group_member = true;
3450 }
3451
3452 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3453 if (group_member) {
3454 field->ltype_mask = ~field->ltype_match;
3455 field_marker = true;
3456 keyoff_marker = true;
3457 valid_key = true;
3458 group_member = false;
3459 }
3460 }
3461 break;
3462 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3463 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3464 field->lid = NPC_LID_LA;
3465 field->ltype_match = NPC_LT_LA_ETHER;
3466 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3467 field->lid = NPC_LID_LF;
3468 field->ltype_match = NPC_LT_LF_TU_ETHER;
3469 }
3470 field->hdr_offset = 0;
3471 field->bytesm1 = 5; /* DMAC 6 Byte */
3472 field->ltype_mask = 0xF;
3473 break;
3474 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3475 field->lid = NPC_LID_LC;
3476 field->hdr_offset = 40; /* IPV6 hdr */
3477 field->bytesm1 = 0; /* 1 Byte ext hdr*/
3478 field->ltype_match = NPC_LT_LC_IP6_EXT;
3479 field->ltype_mask = 0xF;
3480 break;
3481 case NIX_FLOW_KEY_TYPE_GTPU:
3482 field->lid = NPC_LID_LE;
3483 field->hdr_offset = 4;
3484 field->bytesm1 = 3; /* 4 bytes TID*/
3485 field->ltype_match = NPC_LT_LE_GTPU;
3486 field->ltype_mask = 0xF;
3487 break;
3488 case NIX_FLOW_KEY_TYPE_VLAN:
3489 field->lid = NPC_LID_LB;
3490 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3491 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3492 field->ltype_match = NPC_LT_LB_CTAG;
3493 field->ltype_mask = 0xF;
3494 field->fn_mask = 1; /* Mask out the first nibble */
3495 break;
3496 case NIX_FLOW_KEY_TYPE_AH:
3497 case NIX_FLOW_KEY_TYPE_ESP:
3498 field->hdr_offset = 0;
3499 field->bytesm1 = 7; /* SPI + sequence number */
3500 field->ltype_mask = 0xF;
3501 field->lid = NPC_LID_LE;
3502 field->ltype_match = NPC_LT_LE_ESP;
3503 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3504 field->lid = NPC_LID_LD;
3505 field->ltype_match = NPC_LT_LD_AH;
3506 field->hdr_offset = 4;
3507 keyoff_marker = false;
3508 }
3509 break;
3510 }
3511 field->ena = 1;
3512
3513 /* Found a valid flow key type */
3514 if (valid_key) {
3515 /* Use the key offset of TCP/UDP/SCTP fields
3516 * for ESP/AH fields.
3517 */
3518 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3519 key_type == NIX_FLOW_KEY_TYPE_AH)
3520 key_off = l4_key_offset;
3521 field->key_offset = key_off;
3522 memcpy(&alg[nr_field], field, sizeof(*field));
3523 max_key_off = max(max_key_off, field->bytesm1 + 1);
3524
3525 /* Found a field marker, get the next field */
3526 if (field_marker)
3527 nr_field++;
3528 }
3529
3530 /* Found a keyoff marker, update the new key_off */
3531 if (keyoff_marker) {
3532 key_off += max_key_off;
3533 max_key_off = 0;
3534 }
3535 }
3536 /* Processed all the flow key types */
3537 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3538 return 0;
3539 else
3540 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3541 }
3542
reserve_flowkey_alg_idx(struct rvu * rvu,int blkaddr,u32 flow_cfg)3543 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3544 {
3545 u64 field[FIELDS_PER_ALG];
3546 struct nix_hw *hw;
3547 int fid, rc;
3548
3549 hw = get_nix_hw(rvu->hw, blkaddr);
3550 if (!hw)
3551 return NIX_AF_ERR_INVALID_NIXBLK;
3552
3553 /* No room to add new flow hash algoritham */
3554 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3555 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3556
3557 /* Generate algo fields for the given flow_cfg */
3558 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3559 if (rc)
3560 return rc;
3561
3562 /* Update ALGX_FIELDX register with generated fields */
3563 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3564 rvu_write64(rvu, blkaddr,
3565 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3566 fid), field[fid]);
3567
3568 /* Store the flow_cfg for futher lookup */
3569 rc = hw->flowkey.in_use;
3570 hw->flowkey.flowkey[rc] = flow_cfg;
3571 hw->flowkey.in_use++;
3572
3573 return rc;
3574 }
3575
rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu * rvu,struct nix_rss_flowkey_cfg * req,struct nix_rss_flowkey_cfg_rsp * rsp)3576 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3577 struct nix_rss_flowkey_cfg *req,
3578 struct nix_rss_flowkey_cfg_rsp *rsp)
3579 {
3580 u16 pcifunc = req->hdr.pcifunc;
3581 int alg_idx, nixlf, blkaddr;
3582 struct nix_hw *nix_hw;
3583 int err;
3584
3585 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3586 if (err)
3587 return err;
3588
3589 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3590 if (!nix_hw)
3591 return NIX_AF_ERR_INVALID_NIXBLK;
3592
3593 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3594 /* Failed to get algo index from the exiting list, reserve new */
3595 if (alg_idx < 0) {
3596 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3597 req->flowkey_cfg);
3598 if (alg_idx < 0)
3599 return alg_idx;
3600 }
3601 rsp->alg_idx = alg_idx;
3602 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3603 alg_idx, req->mcam_index);
3604 return 0;
3605 }
3606
nix_rx_flowkey_alg_cfg(struct rvu * rvu,int blkaddr)3607 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3608 {
3609 u32 flowkey_cfg, minkey_cfg;
3610 int alg, fid, rc;
3611
3612 /* Disable all flow key algx fieldx */
3613 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3614 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3615 rvu_write64(rvu, blkaddr,
3616 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3617 0);
3618 }
3619
3620 /* IPv4/IPv6 SIP/DIPs */
3621 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3622 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3623 if (rc < 0)
3624 return rc;
3625
3626 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3627 minkey_cfg = flowkey_cfg;
3628 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3629 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3630 if (rc < 0)
3631 return rc;
3632
3633 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3634 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3635 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3636 if (rc < 0)
3637 return rc;
3638
3639 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3640 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3641 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3642 if (rc < 0)
3643 return rc;
3644
3645 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3646 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3647 NIX_FLOW_KEY_TYPE_UDP;
3648 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3649 if (rc < 0)
3650 return rc;
3651
3652 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3653 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3654 NIX_FLOW_KEY_TYPE_SCTP;
3655 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3656 if (rc < 0)
3657 return rc;
3658
3659 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3660 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3661 NIX_FLOW_KEY_TYPE_SCTP;
3662 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3663 if (rc < 0)
3664 return rc;
3665
3666 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3667 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3668 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3669 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3670 if (rc < 0)
3671 return rc;
3672
3673 return 0;
3674 }
3675
rvu_mbox_handler_nix_set_mac_addr(struct rvu * rvu,struct nix_set_mac_addr * req,struct msg_rsp * rsp)3676 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3677 struct nix_set_mac_addr *req,
3678 struct msg_rsp *rsp)
3679 {
3680 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3681 u16 pcifunc = req->hdr.pcifunc;
3682 int blkaddr, nixlf, err;
3683 struct rvu_pfvf *pfvf;
3684
3685 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3686 if (err)
3687 return err;
3688
3689 pfvf = rvu_get_pfvf(rvu, pcifunc);
3690
3691 /* untrusted VF can't overwrite admin(PF) changes */
3692 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3693 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3694 dev_warn(rvu->dev,
3695 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3696 return -EPERM;
3697 }
3698
3699 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3700
3701 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3702 pfvf->rx_chan_base, req->mac_addr);
3703
3704 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3705 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3706
3707 rvu_switch_update_rules(rvu, pcifunc);
3708
3709 return 0;
3710 }
3711
rvu_mbox_handler_nix_get_mac_addr(struct rvu * rvu,struct msg_req * req,struct nix_get_mac_addr_rsp * rsp)3712 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3713 struct msg_req *req,
3714 struct nix_get_mac_addr_rsp *rsp)
3715 {
3716 u16 pcifunc = req->hdr.pcifunc;
3717 struct rvu_pfvf *pfvf;
3718
3719 if (!is_nixlf_attached(rvu, pcifunc))
3720 return NIX_AF_ERR_AF_LF_INVALID;
3721
3722 pfvf = rvu_get_pfvf(rvu, pcifunc);
3723
3724 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3725
3726 return 0;
3727 }
3728
rvu_mbox_handler_nix_set_rx_mode(struct rvu * rvu,struct nix_rx_mode * req,struct msg_rsp * rsp)3729 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3730 struct msg_rsp *rsp)
3731 {
3732 bool allmulti, promisc, nix_rx_multicast;
3733 u16 pcifunc = req->hdr.pcifunc;
3734 struct rvu_pfvf *pfvf;
3735 int nixlf, err;
3736
3737 pfvf = rvu_get_pfvf(rvu, pcifunc);
3738 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3739 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3740 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3741
3742 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3743
3744 if (is_vf(pcifunc) && !nix_rx_multicast &&
3745 (promisc || allmulti)) {
3746 dev_warn_ratelimited(rvu->dev,
3747 "VF promisc/multicast not supported\n");
3748 return 0;
3749 }
3750
3751 /* untrusted VF can't configure promisc/allmulti */
3752 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3753 (promisc || allmulti))
3754 return 0;
3755
3756 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3757 if (err)
3758 return err;
3759
3760 if (nix_rx_multicast) {
3761 /* add/del this PF_FUNC to/from mcast pkt replication list */
3762 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3763 allmulti);
3764 if (err) {
3765 dev_err(rvu->dev,
3766 "Failed to update pcifunc 0x%x to multicast list\n",
3767 pcifunc);
3768 return err;
3769 }
3770
3771 /* add/del this PF_FUNC to/from promisc pkt replication list */
3772 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3773 promisc);
3774 if (err) {
3775 dev_err(rvu->dev,
3776 "Failed to update pcifunc 0x%x to promisc list\n",
3777 pcifunc);
3778 return err;
3779 }
3780 }
3781
3782 /* install/uninstall allmulti entry */
3783 if (allmulti) {
3784 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3785 pfvf->rx_chan_base);
3786 } else {
3787 if (!nix_rx_multicast)
3788 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3789 }
3790
3791 /* install/uninstall promisc entry */
3792 if (promisc) {
3793 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3794 pfvf->rx_chan_base,
3795 pfvf->rx_chan_cnt);
3796
3797 if (rvu_npc_exact_has_match_table(rvu))
3798 rvu_npc_exact_promisc_enable(rvu, pcifunc);
3799 } else {
3800 if (!nix_rx_multicast)
3801 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3802
3803 if (rvu_npc_exact_has_match_table(rvu))
3804 rvu_npc_exact_promisc_disable(rvu, pcifunc);
3805 }
3806
3807 return 0;
3808 }
3809
nix_find_link_frs(struct rvu * rvu,struct nix_frs_cfg * req,u16 pcifunc)3810 static void nix_find_link_frs(struct rvu *rvu,
3811 struct nix_frs_cfg *req, u16 pcifunc)
3812 {
3813 int pf = rvu_get_pf(pcifunc);
3814 struct rvu_pfvf *pfvf;
3815 int maxlen, minlen;
3816 int numvfs, hwvf;
3817 int vf;
3818
3819 /* Update with requester's min/max lengths */
3820 pfvf = rvu_get_pfvf(rvu, pcifunc);
3821 pfvf->maxlen = req->maxlen;
3822 if (req->update_minlen)
3823 pfvf->minlen = req->minlen;
3824
3825 maxlen = req->maxlen;
3826 minlen = req->update_minlen ? req->minlen : 0;
3827
3828 /* Get this PF's numVFs and starting hwvf */
3829 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3830
3831 /* For each VF, compare requested max/minlen */
3832 for (vf = 0; vf < numvfs; vf++) {
3833 pfvf = &rvu->hwvf[hwvf + vf];
3834 if (pfvf->maxlen > maxlen)
3835 maxlen = pfvf->maxlen;
3836 if (req->update_minlen &&
3837 pfvf->minlen && pfvf->minlen < minlen)
3838 minlen = pfvf->minlen;
3839 }
3840
3841 /* Compare requested max/minlen with PF's max/minlen */
3842 pfvf = &rvu->pf[pf];
3843 if (pfvf->maxlen > maxlen)
3844 maxlen = pfvf->maxlen;
3845 if (req->update_minlen &&
3846 pfvf->minlen && pfvf->minlen < minlen)
3847 minlen = pfvf->minlen;
3848
3849 /* Update the request with max/min PF's and it's VF's max/min */
3850 req->maxlen = maxlen;
3851 if (req->update_minlen)
3852 req->minlen = minlen;
3853 }
3854
3855 static int
nix_config_link_credits(struct rvu * rvu,int blkaddr,int link,u16 pcifunc,u64 tx_credits)3856 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
3857 u16 pcifunc, u64 tx_credits)
3858 {
3859 struct rvu_hwinfo *hw = rvu->hw;
3860 int pf = rvu_get_pf(pcifunc);
3861 u8 cgx_id = 0, lmac_id = 0;
3862 unsigned long poll_tmo;
3863 bool restore_tx_en = 0;
3864 struct nix_hw *nix_hw;
3865 u64 cfg, sw_xoff = 0;
3866 u32 schq = 0;
3867 u32 credits;
3868 int rc;
3869
3870 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3871 if (!nix_hw)
3872 return NIX_AF_ERR_INVALID_NIXBLK;
3873
3874 if (tx_credits == nix_hw->tx_credits[link])
3875 return 0;
3876
3877 /* Enable cgx tx if disabled for credits to be back */
3878 if (is_pf_cgxmapped(rvu, pf)) {
3879 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
3880 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
3881 lmac_id, true);
3882 }
3883
3884 mutex_lock(&rvu->rsrc_lock);
3885 /* Disable new traffic to link */
3886 if (hw->cap.nix_shaping) {
3887 schq = nix_get_tx_link(rvu, pcifunc);
3888 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
3889 rvu_write64(rvu, blkaddr,
3890 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
3891 }
3892
3893 rc = NIX_AF_ERR_LINK_CREDITS;
3894 poll_tmo = jiffies + usecs_to_jiffies(200000);
3895 /* Wait for credits to return */
3896 do {
3897 if (time_after(jiffies, poll_tmo))
3898 goto exit;
3899 usleep_range(100, 200);
3900
3901 cfg = rvu_read64(rvu, blkaddr,
3902 NIX_AF_TX_LINKX_NORM_CREDIT(link));
3903 credits = (cfg >> 12) & 0xFFFFFULL;
3904 } while (credits != nix_hw->tx_credits[link]);
3905
3906 cfg &= ~(0xFFFFFULL << 12);
3907 cfg |= (tx_credits << 12);
3908 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3909 rc = 0;
3910
3911 nix_hw->tx_credits[link] = tx_credits;
3912
3913 exit:
3914 /* Enable traffic back */
3915 if (hw->cap.nix_shaping && !sw_xoff)
3916 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
3917
3918 /* Restore state of cgx tx */
3919 if (restore_tx_en)
3920 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3921
3922 mutex_unlock(&rvu->rsrc_lock);
3923 return rc;
3924 }
3925
rvu_mbox_handler_nix_set_hw_frs(struct rvu * rvu,struct nix_frs_cfg * req,struct msg_rsp * rsp)3926 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3927 struct msg_rsp *rsp)
3928 {
3929 struct rvu_hwinfo *hw = rvu->hw;
3930 u16 pcifunc = req->hdr.pcifunc;
3931 int pf = rvu_get_pf(pcifunc);
3932 int blkaddr, schq, link = -1;
3933 struct nix_txsch *txsch;
3934 u64 cfg, lmac_fifo_len;
3935 struct nix_hw *nix_hw;
3936 struct rvu_pfvf *pfvf;
3937 u8 cgx = 0, lmac = 0;
3938 u16 max_mtu;
3939
3940 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3941 if (blkaddr < 0)
3942 return NIX_AF_ERR_AF_LF_INVALID;
3943
3944 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3945 if (!nix_hw)
3946 return NIX_AF_ERR_INVALID_NIXBLK;
3947
3948 if (is_afvf(pcifunc))
3949 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3950 else
3951 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3952
3953 if (!req->sdp_link && req->maxlen > max_mtu)
3954 return NIX_AF_ERR_FRS_INVALID;
3955
3956 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3957 return NIX_AF_ERR_FRS_INVALID;
3958
3959 /* Check if requester wants to update SMQ's */
3960 if (!req->update_smq)
3961 goto rx_frscfg;
3962
3963 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3964 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3965 mutex_lock(&rvu->rsrc_lock);
3966 for (schq = 0; schq < txsch->schq.max; schq++) {
3967 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3968 continue;
3969 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3970 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3971 if (req->update_minlen)
3972 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3973 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3974 }
3975 mutex_unlock(&rvu->rsrc_lock);
3976
3977 rx_frscfg:
3978 /* Check if config is for SDP link */
3979 if (req->sdp_link) {
3980 if (!hw->sdp_links)
3981 return NIX_AF_ERR_RX_LINK_INVALID;
3982 link = hw->cgx_links + hw->lbk_links;
3983 goto linkcfg;
3984 }
3985
3986 /* Check if the request is from CGX mapped RVU PF */
3987 if (is_pf_cgxmapped(rvu, pf)) {
3988 /* Get CGX and LMAC to which this PF is mapped and find link */
3989 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3990 link = (cgx * hw->lmac_per_cgx) + lmac;
3991 } else if (pf == 0) {
3992 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3993 pfvf = rvu_get_pfvf(rvu, pcifunc);
3994 link = hw->cgx_links + pfvf->lbkid;
3995 }
3996
3997 if (link < 0)
3998 return NIX_AF_ERR_RX_LINK_INVALID;
3999
4000 nix_find_link_frs(rvu, req, pcifunc);
4001
4002 linkcfg:
4003 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4004 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4005 if (req->update_minlen)
4006 cfg = (cfg & ~0xFFFFULL) | req->minlen;
4007 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4008
4009 if (req->sdp_link || pf == 0)
4010 return 0;
4011
4012 /* Update transmit credits for CGX links */
4013 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
4014 if (!lmac_fifo_len) {
4015 dev_err(rvu->dev,
4016 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4017 __func__, cgx, lmac);
4018 return 0;
4019 }
4020 return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
4021 (lmac_fifo_len - req->maxlen) / 16);
4022 }
4023
rvu_mbox_handler_nix_set_rx_cfg(struct rvu * rvu,struct nix_rx_cfg * req,struct msg_rsp * rsp)4024 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4025 struct msg_rsp *rsp)
4026 {
4027 int nixlf, blkaddr, err;
4028 u64 cfg;
4029
4030 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4031 if (err)
4032 return err;
4033
4034 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4035 /* Set the interface configuration */
4036 if (req->len_verify & BIT(0))
4037 cfg |= BIT_ULL(41);
4038 else
4039 cfg &= ~BIT_ULL(41);
4040
4041 if (req->len_verify & BIT(1))
4042 cfg |= BIT_ULL(40);
4043 else
4044 cfg &= ~BIT_ULL(40);
4045
4046 if (req->csum_verify & BIT(0))
4047 cfg |= BIT_ULL(37);
4048 else
4049 cfg &= ~BIT_ULL(37);
4050
4051 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4052
4053 return 0;
4054 }
4055
rvu_get_lbk_link_credits(struct rvu * rvu,u16 lbk_max_frs)4056 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4057 {
4058 /* CN10k supports 72KB FIFO size and max packet size of 64k */
4059 if (rvu->hw->lbk_bufsize == 0x12000)
4060 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
4061
4062 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4063 }
4064
nix_link_config(struct rvu * rvu,int blkaddr,struct nix_hw * nix_hw)4065 static void nix_link_config(struct rvu *rvu, int blkaddr,
4066 struct nix_hw *nix_hw)
4067 {
4068 struct rvu_hwinfo *hw = rvu->hw;
4069 int cgx, lmac_cnt, slink, link;
4070 u16 lbk_max_frs, lmac_max_frs;
4071 unsigned long lmac_bmap;
4072 u64 tx_credits, cfg;
4073 u64 lmac_fifo_len;
4074 int iter;
4075
4076 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4077 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4078
4079 /* Set default min/max packet lengths allowed on NIX Rx links.
4080 *
4081 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4082 * as undersize and report them to SW as error pkts, hence
4083 * setting it to 40 bytes.
4084 */
4085 for (link = 0; link < hw->cgx_links; link++) {
4086 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4087 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4088 }
4089
4090 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4091 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4092 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4093 }
4094 if (hw->sdp_links) {
4095 link = hw->cgx_links + hw->lbk_links;
4096 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4097 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4098 }
4099
4100 /* Set credits for Tx links assuming max packet length allowed.
4101 * This will be reconfigured based on MTU set for PF/VF.
4102 */
4103 for (cgx = 0; cgx < hw->cgx; cgx++) {
4104 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4105 /* Skip when cgx is not available or lmac cnt is zero */
4106 if (lmac_cnt <= 0)
4107 continue;
4108 slink = cgx * hw->lmac_per_cgx;
4109
4110 /* Get LMAC id's from bitmap */
4111 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4112 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
4113 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4114 if (!lmac_fifo_len) {
4115 dev_err(rvu->dev,
4116 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4117 __func__, cgx, iter);
4118 continue;
4119 }
4120 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4121 /* Enable credits and set credit pkt count to max allowed */
4122 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4123
4124 link = iter + slink;
4125 nix_hw->tx_credits[link] = tx_credits;
4126 rvu_write64(rvu, blkaddr,
4127 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4128 }
4129 }
4130
4131 /* Set Tx credits for LBK link */
4132 slink = hw->cgx_links;
4133 for (link = slink; link < (slink + hw->lbk_links); link++) {
4134 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4135 nix_hw->tx_credits[link] = tx_credits;
4136 /* Enable credits and set credit pkt count to max allowed */
4137 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4138 rvu_write64(rvu, blkaddr,
4139 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4140 }
4141 }
4142
nix_calibrate_x2p(struct rvu * rvu,int blkaddr)4143 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4144 {
4145 int idx, err;
4146 u64 status;
4147
4148 /* Start X2P bus calibration */
4149 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4150 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4151 /* Wait for calibration to complete */
4152 err = rvu_poll_reg(rvu, blkaddr,
4153 NIX_AF_STATUS, BIT_ULL(10), false);
4154 if (err) {
4155 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4156 return err;
4157 }
4158
4159 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4160 /* Check if CGX devices are ready */
4161 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4162 /* Skip when cgx port is not available */
4163 if (!rvu_cgx_pdata(idx, rvu) ||
4164 (status & (BIT_ULL(16 + idx))))
4165 continue;
4166 dev_err(rvu->dev,
4167 "CGX%d didn't respond to NIX X2P calibration\n", idx);
4168 err = -EBUSY;
4169 }
4170
4171 /* Check if LBK is ready */
4172 if (!(status & BIT_ULL(19))) {
4173 dev_err(rvu->dev,
4174 "LBK didn't respond to NIX X2P calibration\n");
4175 err = -EBUSY;
4176 }
4177
4178 /* Clear 'calibrate_x2p' bit */
4179 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4180 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4181 if (err || (status & 0x3FFULL))
4182 dev_err(rvu->dev,
4183 "NIX X2P calibration failed, status 0x%llx\n", status);
4184 if (err)
4185 return err;
4186 return 0;
4187 }
4188
nix_aq_init(struct rvu * rvu,struct rvu_block * block)4189 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4190 {
4191 u64 cfg;
4192 int err;
4193
4194 /* Set admin queue endianness */
4195 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4196 #ifdef __BIG_ENDIAN
4197 cfg |= BIT_ULL(8);
4198 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4199 #else
4200 cfg &= ~BIT_ULL(8);
4201 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4202 #endif
4203
4204 /* Do not bypass NDC cache */
4205 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4206 cfg &= ~0x3FFEULL;
4207 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4208 /* Disable caching of SQB aka SQEs */
4209 cfg |= 0x04ULL;
4210 #endif
4211 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4212
4213 /* Result structure can be followed by RQ/SQ/CQ context at
4214 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4215 * operation type. Alloc sufficient result memory for all operations.
4216 */
4217 err = rvu_aq_alloc(rvu, &block->aq,
4218 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4219 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4220 if (err)
4221 return err;
4222
4223 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4224 rvu_write64(rvu, block->addr,
4225 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4226 return 0;
4227 }
4228
rvu_nix_setup_capabilities(struct rvu * rvu,int blkaddr)4229 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4230 {
4231 struct rvu_hwinfo *hw = rvu->hw;
4232 u64 hw_const;
4233
4234 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4235
4236 /* On OcteonTx2 DWRR quantum is directly configured into each of
4237 * the transmit scheduler queues. And PF/VF drivers were free to
4238 * config any value upto 2^24.
4239 * On CN10K, HW is modified, the quantum configuration at scheduler
4240 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4241 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4242 * 'DWRR MTU * weight' to get the quantum.
4243 *
4244 * Check if HW uses a common MTU for all DWRR quantum configs.
4245 * On OcteonTx2 this register field is '0'.
4246 */
4247 if (((hw_const >> 56) & 0x10) == 0x10)
4248 hw->cap.nix_common_dwrr_mtu = true;
4249 }
4250
rvu_nix_block_init(struct rvu * rvu,struct nix_hw * nix_hw)4251 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4252 {
4253 const struct npc_lt_def_cfg *ltdefs;
4254 struct rvu_hwinfo *hw = rvu->hw;
4255 int blkaddr = nix_hw->blkaddr;
4256 struct rvu_block *block;
4257 int err;
4258 u64 cfg;
4259
4260 block = &hw->block[blkaddr];
4261
4262 if (is_rvu_96xx_B0(rvu)) {
4263 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4264 * internal state when conditional clocks are turned off.
4265 * Hence enable them.
4266 */
4267 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4268 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4269
4270 /* Set chan/link to backpressure TL3 instead of TL2 */
4271 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4272
4273 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4274 * This sticky mode is known to cause SQ stalls when multiple
4275 * SQs are mapped to same SMQ and transmitting pkts at a time.
4276 */
4277 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4278 cfg &= ~BIT_ULL(15);
4279 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4280 }
4281
4282 ltdefs = rvu->kpu.lt_def;
4283 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4284 err = nix_calibrate_x2p(rvu, blkaddr);
4285 if (err)
4286 return err;
4287
4288 /* Setup capabilities of the NIX block */
4289 rvu_nix_setup_capabilities(rvu, blkaddr);
4290
4291 /* Initialize admin queue */
4292 err = nix_aq_init(rvu, block);
4293 if (err)
4294 return err;
4295
4296 /* Restore CINT timer delay to HW reset values */
4297 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4298
4299 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4300
4301 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4302 cfg |= 1ULL;
4303 if (!is_rvu_otx2(rvu))
4304 cfg |= NIX_PTP_1STEP_EN;
4305
4306 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4307
4308 if (is_block_implemented(hw, blkaddr)) {
4309 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4310 if (err)
4311 return err;
4312
4313 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4314 if (err)
4315 return err;
4316
4317 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4318 if (err)
4319 return err;
4320
4321 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4322 if (err)
4323 return err;
4324
4325 err = nix_setup_txvlan(rvu, nix_hw);
4326 if (err)
4327 return err;
4328
4329 /* Configure segmentation offload formats */
4330 nix_setup_lso(rvu, nix_hw, blkaddr);
4331
4332 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4333 * This helps HW protocol checker to identify headers
4334 * and validate length and checksums.
4335 */
4336 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4337 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4338 ltdefs->rx_ol2.ltype_mask);
4339 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4340 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4341 ltdefs->rx_oip4.ltype_mask);
4342 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4343 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4344 ltdefs->rx_iip4.ltype_mask);
4345 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4346 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4347 ltdefs->rx_oip6.ltype_mask);
4348 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4349 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4350 ltdefs->rx_iip6.ltype_mask);
4351 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4352 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4353 ltdefs->rx_otcp.ltype_mask);
4354 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4355 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4356 ltdefs->rx_itcp.ltype_mask);
4357 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4358 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4359 ltdefs->rx_oudp.ltype_mask);
4360 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4361 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4362 ltdefs->rx_iudp.ltype_mask);
4363 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4364 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4365 ltdefs->rx_osctp.ltype_mask);
4366 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4367 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4368 ltdefs->rx_isctp.ltype_mask);
4369
4370 if (!is_rvu_otx2(rvu)) {
4371 /* Enable APAD calculation for other protocols
4372 * matching APAD0 and APAD1 lt def registers.
4373 */
4374 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4375 (ltdefs->rx_apad0.valid << 11) |
4376 (ltdefs->rx_apad0.lid << 8) |
4377 (ltdefs->rx_apad0.ltype_match << 4) |
4378 ltdefs->rx_apad0.ltype_mask);
4379 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4380 (ltdefs->rx_apad1.valid << 11) |
4381 (ltdefs->rx_apad1.lid << 8) |
4382 (ltdefs->rx_apad1.ltype_match << 4) |
4383 ltdefs->rx_apad1.ltype_mask);
4384
4385 /* Receive ethertype defination register defines layer
4386 * information in NPC_RESULT_S to identify the Ethertype
4387 * location in L2 header. Used for Ethertype overwriting
4388 * in inline IPsec flow.
4389 */
4390 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4391 (ltdefs->rx_et[0].offset << 12) |
4392 (ltdefs->rx_et[0].valid << 11) |
4393 (ltdefs->rx_et[0].lid << 8) |
4394 (ltdefs->rx_et[0].ltype_match << 4) |
4395 ltdefs->rx_et[0].ltype_mask);
4396 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4397 (ltdefs->rx_et[1].offset << 12) |
4398 (ltdefs->rx_et[1].valid << 11) |
4399 (ltdefs->rx_et[1].lid << 8) |
4400 (ltdefs->rx_et[1].ltype_match << 4) |
4401 ltdefs->rx_et[1].ltype_mask);
4402 }
4403
4404 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4405 if (err)
4406 return err;
4407
4408 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4409 sizeof(u64), GFP_KERNEL);
4410 if (!nix_hw->tx_credits)
4411 return -ENOMEM;
4412
4413 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4414 nix_link_config(rvu, blkaddr, nix_hw);
4415
4416 /* Enable Channel backpressure */
4417 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4418 }
4419 return 0;
4420 }
4421
rvu_nix_init(struct rvu * rvu)4422 int rvu_nix_init(struct rvu *rvu)
4423 {
4424 struct rvu_hwinfo *hw = rvu->hw;
4425 struct nix_hw *nix_hw;
4426 int blkaddr = 0, err;
4427 int i = 0;
4428
4429 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4430 GFP_KERNEL);
4431 if (!hw->nix)
4432 return -ENOMEM;
4433
4434 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4435 while (blkaddr) {
4436 nix_hw = &hw->nix[i];
4437 nix_hw->rvu = rvu;
4438 nix_hw->blkaddr = blkaddr;
4439 err = rvu_nix_block_init(rvu, nix_hw);
4440 if (err)
4441 return err;
4442 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4443 i++;
4444 }
4445
4446 return 0;
4447 }
4448
rvu_nix_block_freemem(struct rvu * rvu,int blkaddr,struct rvu_block * block)4449 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4450 struct rvu_block *block)
4451 {
4452 struct nix_txsch *txsch;
4453 struct nix_mcast *mcast;
4454 struct nix_txvlan *vlan;
4455 struct nix_hw *nix_hw;
4456 int lvl;
4457
4458 rvu_aq_free(rvu, block->aq);
4459
4460 if (is_block_implemented(rvu->hw, blkaddr)) {
4461 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4462 if (!nix_hw)
4463 return;
4464
4465 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4466 txsch = &nix_hw->txsch[lvl];
4467 kfree(txsch->schq.bmap);
4468 }
4469
4470 kfree(nix_hw->tx_credits);
4471
4472 nix_ipolicer_freemem(rvu, nix_hw);
4473
4474 vlan = &nix_hw->txvlan;
4475 kfree(vlan->rsrc.bmap);
4476 mutex_destroy(&vlan->rsrc_lock);
4477
4478 mcast = &nix_hw->mcast;
4479 qmem_free(rvu->dev, mcast->mce_ctx);
4480 qmem_free(rvu->dev, mcast->mcast_buf);
4481 mutex_destroy(&mcast->mce_lock);
4482 }
4483 }
4484
rvu_nix_freemem(struct rvu * rvu)4485 void rvu_nix_freemem(struct rvu *rvu)
4486 {
4487 struct rvu_hwinfo *hw = rvu->hw;
4488 struct rvu_block *block;
4489 int blkaddr = 0;
4490
4491 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4492 while (blkaddr) {
4493 block = &hw->block[blkaddr];
4494 rvu_nix_block_freemem(rvu, blkaddr, block);
4495 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4496 }
4497 }
4498
rvu_mbox_handler_nix_lf_start_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)4499 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4500 struct msg_rsp *rsp)
4501 {
4502 u16 pcifunc = req->hdr.pcifunc;
4503 struct rvu_pfvf *pfvf;
4504 int nixlf, err;
4505
4506 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4507 if (err)
4508 return err;
4509
4510 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4511
4512 npc_mcam_enable_flows(rvu, pcifunc);
4513
4514 pfvf = rvu_get_pfvf(rvu, pcifunc);
4515 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4516
4517 rvu_switch_update_rules(rvu, pcifunc);
4518
4519 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4520 }
4521
rvu_mbox_handler_nix_lf_stop_rx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)4522 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4523 struct msg_rsp *rsp)
4524 {
4525 u16 pcifunc = req->hdr.pcifunc;
4526 struct rvu_pfvf *pfvf;
4527 int nixlf, err;
4528
4529 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4530 if (err)
4531 return err;
4532
4533 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4534
4535 pfvf = rvu_get_pfvf(rvu, pcifunc);
4536 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4537
4538 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
4539 }
4540
4541 #define RX_SA_BASE GENMASK_ULL(52, 7)
4542
rvu_nix_lf_teardown(struct rvu * rvu,u16 pcifunc,int blkaddr,int nixlf)4543 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4544 {
4545 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4546 struct hwctx_disable_req ctx_req;
4547 int pf = rvu_get_pf(pcifunc);
4548 struct mac_ops *mac_ops;
4549 u8 cgx_id, lmac_id;
4550 u64 sa_base;
4551 void *cgxd;
4552 int err;
4553
4554 ctx_req.hdr.pcifunc = pcifunc;
4555
4556 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4557 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4558 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4559 nix_interface_deinit(rvu, pcifunc, nixlf);
4560 nix_rx_sync(rvu, blkaddr);
4561 nix_txschq_free(rvu, pcifunc);
4562
4563 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4564
4565 rvu_cgx_start_stop_io(rvu, pcifunc, false);
4566
4567 if (pfvf->sq_ctx) {
4568 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4569 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4570 if (err)
4571 dev_err(rvu->dev, "SQ ctx disable failed\n");
4572 }
4573
4574 if (pfvf->rq_ctx) {
4575 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4576 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4577 if (err)
4578 dev_err(rvu->dev, "RQ ctx disable failed\n");
4579 }
4580
4581 if (pfvf->cq_ctx) {
4582 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4583 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4584 if (err)
4585 dev_err(rvu->dev, "CQ ctx disable failed\n");
4586 }
4587
4588 /* reset HW config done for Switch headers */
4589 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4590 (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4591
4592 /* Disabling CGX and NPC config done for PTP */
4593 if (pfvf->hw_rx_tstamp_en) {
4594 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4595 cgxd = rvu_cgx_pdata(cgx_id, rvu);
4596 mac_ops = get_mac_ops(cgxd);
4597 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4598 /* Undo NPC config done for PTP */
4599 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4600 dev_err(rvu->dev, "NPC config for PTP failed\n");
4601 pfvf->hw_rx_tstamp_en = false;
4602 }
4603
4604 /* reset priority flow control config */
4605 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4606
4607 /* reset 802.3x flow control config */
4608 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4609
4610 nix_ctx_free(rvu, pfvf);
4611
4612 nix_free_all_bandprof(rvu, pcifunc);
4613
4614 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4615 if (FIELD_GET(RX_SA_BASE, sa_base)) {
4616 err = rvu_cpt_ctx_flush(rvu, pcifunc);
4617 if (err)
4618 dev_err(rvu->dev,
4619 "CPT ctx flush failed with error: %d\n", err);
4620 }
4621 }
4622
4623 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
4624
rvu_nix_lf_ptp_tx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)4625 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4626 {
4627 struct rvu_hwinfo *hw = rvu->hw;
4628 struct rvu_block *block;
4629 int blkaddr, pf;
4630 int nixlf;
4631 u64 cfg;
4632
4633 pf = rvu_get_pf(pcifunc);
4634 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4635 return 0;
4636
4637 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4638 if (blkaddr < 0)
4639 return NIX_AF_ERR_AF_LF_INVALID;
4640
4641 block = &hw->block[blkaddr];
4642 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4643 if (nixlf < 0)
4644 return NIX_AF_ERR_AF_LF_INVALID;
4645
4646 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4647
4648 if (enable)
4649 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4650 else
4651 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4652
4653 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4654
4655 return 0;
4656 }
4657
rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)4658 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4659 struct msg_rsp *rsp)
4660 {
4661 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4662 }
4663
rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)4664 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4665 struct msg_rsp *rsp)
4666 {
4667 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4668 }
4669
rvu_mbox_handler_nix_lso_format_cfg(struct rvu * rvu,struct nix_lso_format_cfg * req,struct nix_lso_format_cfg_rsp * rsp)4670 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4671 struct nix_lso_format_cfg *req,
4672 struct nix_lso_format_cfg_rsp *rsp)
4673 {
4674 u16 pcifunc = req->hdr.pcifunc;
4675 struct nix_hw *nix_hw;
4676 struct rvu_pfvf *pfvf;
4677 int blkaddr, idx, f;
4678 u64 reg;
4679
4680 pfvf = rvu_get_pfvf(rvu, pcifunc);
4681 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4682 if (!pfvf->nixlf || blkaddr < 0)
4683 return NIX_AF_ERR_AF_LF_INVALID;
4684
4685 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4686 if (!nix_hw)
4687 return NIX_AF_ERR_INVALID_NIXBLK;
4688
4689 /* Find existing matching LSO format, if any */
4690 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4691 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4692 reg = rvu_read64(rvu, blkaddr,
4693 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4694 if (req->fields[f] != (reg & req->field_mask))
4695 break;
4696 }
4697
4698 if (f == NIX_LSO_FIELD_MAX)
4699 break;
4700 }
4701
4702 if (idx < nix_hw->lso.in_use) {
4703 /* Match found */
4704 rsp->lso_format_idx = idx;
4705 return 0;
4706 }
4707
4708 if (nix_hw->lso.in_use == nix_hw->lso.total)
4709 return NIX_AF_ERR_LSO_CFG_FAIL;
4710
4711 rsp->lso_format_idx = nix_hw->lso.in_use++;
4712
4713 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4714 rvu_write64(rvu, blkaddr,
4715 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4716 req->fields[f]);
4717
4718 return 0;
4719 }
4720
4721 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
4722 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
4723 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
4724 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
4725
4726 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
4727 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
4728 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
4729
nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,int blkaddr)4730 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
4731 int blkaddr)
4732 {
4733 u8 cpt_idx, cpt_blkaddr;
4734 u64 val;
4735
4736 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
4737 if (req->enable) {
4738 val = 0;
4739 /* Enable context prefetching */
4740 if (!is_rvu_otx2(rvu))
4741 val |= BIT_ULL(51);
4742
4743 /* Set OPCODE and EGRP */
4744 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
4745 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
4746 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
4747 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
4748
4749 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
4750
4751 /* Set CPT queue for inline IPSec */
4752 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
4753 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
4754 req->inst_qsel.cpt_pf_func);
4755
4756 if (!is_rvu_otx2(rvu)) {
4757 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
4758 BLKADDR_CPT1;
4759 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
4760 }
4761
4762 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
4763 val);
4764
4765 /* Set CPT credit */
4766 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
4767 req->cpt_credit);
4768 } else {
4769 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
4770 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
4771 0x0);
4772 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
4773 0x3FFFFF);
4774 }
4775 }
4776
rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu * rvu,struct nix_inline_ipsec_cfg * req,struct msg_rsp * rsp)4777 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
4778 struct nix_inline_ipsec_cfg *req,
4779 struct msg_rsp *rsp)
4780 {
4781 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
4782 return 0;
4783
4784 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
4785 if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
4786 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
4787
4788 return 0;
4789 }
4790
rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu * rvu,struct nix_inline_ipsec_lf_cfg * req,struct msg_rsp * rsp)4791 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
4792 struct nix_inline_ipsec_lf_cfg *req,
4793 struct msg_rsp *rsp)
4794 {
4795 int lf, blkaddr, err;
4796 u64 val;
4797
4798 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
4799 return 0;
4800
4801 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
4802 if (err)
4803 return err;
4804
4805 if (req->enable) {
4806 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
4807 val = (u64)req->ipsec_cfg0.tt << 44 |
4808 (u64)req->ipsec_cfg0.tag_const << 20 |
4809 (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
4810 req->ipsec_cfg0.lenm1_max;
4811
4812 if (blkaddr == BLKADDR_NIX1)
4813 val |= BIT_ULL(46);
4814
4815 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
4816
4817 /* Set SA_IDX_W and SA_IDX_MAX */
4818 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
4819 req->ipsec_cfg1.sa_idx_max;
4820 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
4821
4822 /* Set SA base address */
4823 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
4824 req->sa_base_addr);
4825 } else {
4826 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
4827 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
4828 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
4829 0x0);
4830 }
4831
4832 return 0;
4833 }
rvu_nix_reset_mac(struct rvu_pfvf * pfvf,int pcifunc)4834 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4835 {
4836 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4837
4838 /* overwrite vf mac address with default_mac */
4839 if (from_vf)
4840 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4841 }
4842
4843 /* NIX ingress policers or bandwidth profiles APIs */
nix_config_rx_pkt_policer_precolor(struct rvu * rvu,int blkaddr)4844 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4845 {
4846 struct npc_lt_def_cfg defs, *ltdefs;
4847
4848 ltdefs = &defs;
4849 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4850
4851 /* Extract PCP and DEI fields from outer VLAN from byte offset
4852 * 2 from the start of LB_PTR (ie TAG).
4853 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4854 * fields are considered when 'Tunnel enable' is set in profile.
4855 */
4856 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4857 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4858 (ltdefs->ovlan.ltype_match << 4) |
4859 ltdefs->ovlan.ltype_mask);
4860 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4861 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4862 (ltdefs->ivlan.ltype_match << 4) |
4863 ltdefs->ivlan.ltype_mask);
4864
4865 /* DSCP field in outer and tunneled IPv4 packets */
4866 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4867 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4868 (ltdefs->rx_oip4.ltype_match << 4) |
4869 ltdefs->rx_oip4.ltype_mask);
4870 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4871 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4872 (ltdefs->rx_iip4.ltype_match << 4) |
4873 ltdefs->rx_iip4.ltype_mask);
4874
4875 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4876 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4877 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4878 (ltdefs->rx_oip6.ltype_match << 4) |
4879 ltdefs->rx_oip6.ltype_mask);
4880 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4881 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4882 (ltdefs->rx_iip6.ltype_match << 4) |
4883 ltdefs->rx_iip6.ltype_mask);
4884 }
4885
nix_init_policer_context(struct rvu * rvu,struct nix_hw * nix_hw,int layer,int prof_idx)4886 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4887 int layer, int prof_idx)
4888 {
4889 struct nix_cn10k_aq_enq_req aq_req;
4890 int rc;
4891
4892 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4893
4894 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4895 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4896 aq_req.op = NIX_AQ_INSTOP_INIT;
4897
4898 /* Context is all zeros, submit to AQ */
4899 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4900 (struct nix_aq_enq_req *)&aq_req, NULL);
4901 if (rc)
4902 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4903 layer, prof_idx);
4904 return rc;
4905 }
4906
nix_setup_ipolicers(struct rvu * rvu,struct nix_hw * nix_hw,int blkaddr)4907 static int nix_setup_ipolicers(struct rvu *rvu,
4908 struct nix_hw *nix_hw, int blkaddr)
4909 {
4910 struct rvu_hwinfo *hw = rvu->hw;
4911 struct nix_ipolicer *ipolicer;
4912 int err, layer, prof_idx;
4913 u64 cfg;
4914
4915 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4916 if (!(cfg & BIT_ULL(61))) {
4917 hw->cap.ipolicer = false;
4918 return 0;
4919 }
4920
4921 hw->cap.ipolicer = true;
4922 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4923 sizeof(*ipolicer), GFP_KERNEL);
4924 if (!nix_hw->ipolicer)
4925 return -ENOMEM;
4926
4927 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4928
4929 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4930 ipolicer = &nix_hw->ipolicer[layer];
4931 switch (layer) {
4932 case BAND_PROF_LEAF_LAYER:
4933 ipolicer->band_prof.max = cfg & 0XFFFF;
4934 break;
4935 case BAND_PROF_MID_LAYER:
4936 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4937 break;
4938 case BAND_PROF_TOP_LAYER:
4939 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4940 break;
4941 }
4942
4943 if (!ipolicer->band_prof.max)
4944 continue;
4945
4946 err = rvu_alloc_bitmap(&ipolicer->band_prof);
4947 if (err)
4948 return err;
4949
4950 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4951 ipolicer->band_prof.max,
4952 sizeof(u16), GFP_KERNEL);
4953 if (!ipolicer->pfvf_map)
4954 return -ENOMEM;
4955
4956 ipolicer->match_id = devm_kcalloc(rvu->dev,
4957 ipolicer->band_prof.max,
4958 sizeof(u16), GFP_KERNEL);
4959 if (!ipolicer->match_id)
4960 return -ENOMEM;
4961
4962 for (prof_idx = 0;
4963 prof_idx < ipolicer->band_prof.max; prof_idx++) {
4964 /* Set AF as current owner for INIT ops to succeed */
4965 ipolicer->pfvf_map[prof_idx] = 0x00;
4966
4967 /* There is no enable bit in the profile context,
4968 * so no context disable. So let's INIT them here
4969 * so that PF/VF later on have to just do WRITE to
4970 * setup policer rates and config.
4971 */
4972 err = nix_init_policer_context(rvu, nix_hw,
4973 layer, prof_idx);
4974 if (err)
4975 return err;
4976 }
4977
4978 /* Allocate memory for maintaining ref_counts for MID level
4979 * profiles, this will be needed for leaf layer profiles'
4980 * aggregation.
4981 */
4982 if (layer != BAND_PROF_MID_LAYER)
4983 continue;
4984
4985 ipolicer->ref_count = devm_kcalloc(rvu->dev,
4986 ipolicer->band_prof.max,
4987 sizeof(u16), GFP_KERNEL);
4988 if (!ipolicer->ref_count)
4989 return -ENOMEM;
4990 }
4991
4992 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4993 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4994
4995 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4996
4997 return 0;
4998 }
4999
nix_ipolicer_freemem(struct rvu * rvu,struct nix_hw * nix_hw)5000 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5001 {
5002 struct nix_ipolicer *ipolicer;
5003 int layer;
5004
5005 if (!rvu->hw->cap.ipolicer)
5006 return;
5007
5008 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5009 ipolicer = &nix_hw->ipolicer[layer];
5010
5011 if (!ipolicer->band_prof.max)
5012 continue;
5013
5014 kfree(ipolicer->band_prof.bmap);
5015 }
5016 }
5017
nix_verify_bandprof(struct nix_cn10k_aq_enq_req * req,struct nix_hw * nix_hw,u16 pcifunc)5018 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5019 struct nix_hw *nix_hw, u16 pcifunc)
5020 {
5021 struct nix_ipolicer *ipolicer;
5022 int layer, hi_layer, prof_idx;
5023
5024 /* Bits [15:14] in profile index represent layer */
5025 layer = (req->qidx >> 14) & 0x03;
5026 prof_idx = req->qidx & 0x3FFF;
5027
5028 ipolicer = &nix_hw->ipolicer[layer];
5029 if (prof_idx >= ipolicer->band_prof.max)
5030 return -EINVAL;
5031
5032 /* Check if the profile is allocated to the requesting PCIFUNC or not
5033 * with the exception of AF. AF is allowed to read and update contexts.
5034 */
5035 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5036 return -EINVAL;
5037
5038 /* If this profile is linked to higher layer profile then check
5039 * if that profile is also allocated to the requesting PCIFUNC
5040 * or not.
5041 */
5042 if (!req->prof.hl_en)
5043 return 0;
5044
5045 /* Leaf layer profile can link only to mid layer and
5046 * mid layer to top layer.
5047 */
5048 if (layer == BAND_PROF_LEAF_LAYER)
5049 hi_layer = BAND_PROF_MID_LAYER;
5050 else if (layer == BAND_PROF_MID_LAYER)
5051 hi_layer = BAND_PROF_TOP_LAYER;
5052 else
5053 return -EINVAL;
5054
5055 ipolicer = &nix_hw->ipolicer[hi_layer];
5056 prof_idx = req->prof.band_prof_id;
5057 if (prof_idx >= ipolicer->band_prof.max ||
5058 ipolicer->pfvf_map[prof_idx] != pcifunc)
5059 return -EINVAL;
5060
5061 return 0;
5062 }
5063
rvu_mbox_handler_nix_bandprof_alloc(struct rvu * rvu,struct nix_bandprof_alloc_req * req,struct nix_bandprof_alloc_rsp * rsp)5064 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5065 struct nix_bandprof_alloc_req *req,
5066 struct nix_bandprof_alloc_rsp *rsp)
5067 {
5068 int blkaddr, layer, prof, idx, err;
5069 u16 pcifunc = req->hdr.pcifunc;
5070 struct nix_ipolicer *ipolicer;
5071 struct nix_hw *nix_hw;
5072
5073 if (!rvu->hw->cap.ipolicer)
5074 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5075
5076 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5077 if (err)
5078 return err;
5079
5080 mutex_lock(&rvu->rsrc_lock);
5081 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5082 if (layer == BAND_PROF_INVAL_LAYER)
5083 continue;
5084 if (!req->prof_count[layer])
5085 continue;
5086
5087 ipolicer = &nix_hw->ipolicer[layer];
5088 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5089 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5090 if (idx == MAX_BANDPROF_PER_PFFUNC)
5091 break;
5092
5093 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5094 if (prof < 0)
5095 break;
5096 rsp->prof_count[layer]++;
5097 rsp->prof_idx[layer][idx] = prof;
5098 ipolicer->pfvf_map[prof] = pcifunc;
5099 }
5100 }
5101 mutex_unlock(&rvu->rsrc_lock);
5102 return 0;
5103 }
5104
nix_free_all_bandprof(struct rvu * rvu,u16 pcifunc)5105 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5106 {
5107 int blkaddr, layer, prof_idx, err;
5108 struct nix_ipolicer *ipolicer;
5109 struct nix_hw *nix_hw;
5110
5111 if (!rvu->hw->cap.ipolicer)
5112 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5113
5114 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5115 if (err)
5116 return err;
5117
5118 mutex_lock(&rvu->rsrc_lock);
5119 /* Free all the profiles allocated to the PCIFUNC */
5120 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5121 if (layer == BAND_PROF_INVAL_LAYER)
5122 continue;
5123 ipolicer = &nix_hw->ipolicer[layer];
5124
5125 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5126 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5127 continue;
5128
5129 /* Clear ratelimit aggregation, if any */
5130 if (layer == BAND_PROF_LEAF_LAYER &&
5131 ipolicer->match_id[prof_idx])
5132 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5133
5134 ipolicer->pfvf_map[prof_idx] = 0x00;
5135 ipolicer->match_id[prof_idx] = 0;
5136 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5137 }
5138 }
5139 mutex_unlock(&rvu->rsrc_lock);
5140 return 0;
5141 }
5142
rvu_mbox_handler_nix_bandprof_free(struct rvu * rvu,struct nix_bandprof_free_req * req,struct msg_rsp * rsp)5143 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5144 struct nix_bandprof_free_req *req,
5145 struct msg_rsp *rsp)
5146 {
5147 int blkaddr, layer, prof_idx, idx, err;
5148 u16 pcifunc = req->hdr.pcifunc;
5149 struct nix_ipolicer *ipolicer;
5150 struct nix_hw *nix_hw;
5151
5152 if (req->free_all)
5153 return nix_free_all_bandprof(rvu, pcifunc);
5154
5155 if (!rvu->hw->cap.ipolicer)
5156 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5157
5158 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5159 if (err)
5160 return err;
5161
5162 mutex_lock(&rvu->rsrc_lock);
5163 /* Free the requested profile indices */
5164 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5165 if (layer == BAND_PROF_INVAL_LAYER)
5166 continue;
5167 if (!req->prof_count[layer])
5168 continue;
5169
5170 ipolicer = &nix_hw->ipolicer[layer];
5171 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5172 prof_idx = req->prof_idx[layer][idx];
5173 if (prof_idx >= ipolicer->band_prof.max ||
5174 ipolicer->pfvf_map[prof_idx] != pcifunc)
5175 continue;
5176
5177 /* Clear ratelimit aggregation, if any */
5178 if (layer == BAND_PROF_LEAF_LAYER &&
5179 ipolicer->match_id[prof_idx])
5180 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5181
5182 ipolicer->pfvf_map[prof_idx] = 0x00;
5183 ipolicer->match_id[prof_idx] = 0;
5184 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5185 if (idx == MAX_BANDPROF_PER_PFFUNC)
5186 break;
5187 }
5188 }
5189 mutex_unlock(&rvu->rsrc_lock);
5190 return 0;
5191 }
5192
nix_aq_context_read(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u16 pcifunc,u8 ctype,u32 qidx)5193 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5194 struct nix_cn10k_aq_enq_req *aq_req,
5195 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5196 u16 pcifunc, u8 ctype, u32 qidx)
5197 {
5198 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5199 aq_req->hdr.pcifunc = pcifunc;
5200 aq_req->ctype = ctype;
5201 aq_req->op = NIX_AQ_INSTOP_READ;
5202 aq_req->qidx = qidx;
5203
5204 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5205 (struct nix_aq_enq_req *)aq_req,
5206 (struct nix_aq_enq_rsp *)aq_rsp);
5207 }
5208
nix_ipolicer_map_leaf_midprofs(struct rvu * rvu,struct nix_hw * nix_hw,struct nix_cn10k_aq_enq_req * aq_req,struct nix_cn10k_aq_enq_rsp * aq_rsp,u32 leaf_prof,u16 mid_prof)5209 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5210 struct nix_hw *nix_hw,
5211 struct nix_cn10k_aq_enq_req *aq_req,
5212 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5213 u32 leaf_prof, u16 mid_prof)
5214 {
5215 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5216 aq_req->hdr.pcifunc = 0x00;
5217 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5218 aq_req->op = NIX_AQ_INSTOP_WRITE;
5219 aq_req->qidx = leaf_prof;
5220
5221 aq_req->prof.band_prof_id = mid_prof;
5222 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5223 aq_req->prof.hl_en = 1;
5224 aq_req->prof_mask.hl_en = 1;
5225
5226 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5227 (struct nix_aq_enq_req *)aq_req,
5228 (struct nix_aq_enq_rsp *)aq_rsp);
5229 }
5230
rvu_nix_setup_ratelimit_aggr(struct rvu * rvu,u16 pcifunc,u16 rq_idx,u16 match_id)5231 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5232 u16 rq_idx, u16 match_id)
5233 {
5234 int leaf_prof, mid_prof, leaf_match;
5235 struct nix_cn10k_aq_enq_req aq_req;
5236 struct nix_cn10k_aq_enq_rsp aq_rsp;
5237 struct nix_ipolicer *ipolicer;
5238 struct nix_hw *nix_hw;
5239 int blkaddr, idx, rc;
5240
5241 if (!rvu->hw->cap.ipolicer)
5242 return 0;
5243
5244 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5245 if (rc)
5246 return rc;
5247
5248 /* Fetch the RQ's context to see if policing is enabled */
5249 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5250 NIX_AQ_CTYPE_RQ, rq_idx);
5251 if (rc) {
5252 dev_err(rvu->dev,
5253 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5254 __func__, rq_idx, pcifunc);
5255 return rc;
5256 }
5257
5258 if (!aq_rsp.rq.policer_ena)
5259 return 0;
5260
5261 /* Get the bandwidth profile ID mapped to this RQ */
5262 leaf_prof = aq_rsp.rq.band_prof_id;
5263
5264 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5265 ipolicer->match_id[leaf_prof] = match_id;
5266
5267 /* Check if any other leaf profile is marked with same match_id */
5268 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5269 if (idx == leaf_prof)
5270 continue;
5271 if (ipolicer->match_id[idx] != match_id)
5272 continue;
5273
5274 leaf_match = idx;
5275 break;
5276 }
5277
5278 if (idx == ipolicer->band_prof.max)
5279 return 0;
5280
5281 /* Fetch the matching profile's context to check if it's already
5282 * mapped to a mid level profile.
5283 */
5284 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5285 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5286 if (rc) {
5287 dev_err(rvu->dev,
5288 "%s: Failed to fetch context of leaf profile %d\n",
5289 __func__, leaf_match);
5290 return rc;
5291 }
5292
5293 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5294 if (aq_rsp.prof.hl_en) {
5295 /* Get Mid layer prof index and map leaf_prof index
5296 * also such that flows that are being steered
5297 * to different RQs and marked with same match_id
5298 * are rate limited in a aggregate fashion
5299 */
5300 mid_prof = aq_rsp.prof.band_prof_id;
5301 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5302 &aq_req, &aq_rsp,
5303 leaf_prof, mid_prof);
5304 if (rc) {
5305 dev_err(rvu->dev,
5306 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5307 __func__, leaf_prof, mid_prof);
5308 goto exit;
5309 }
5310
5311 mutex_lock(&rvu->rsrc_lock);
5312 ipolicer->ref_count[mid_prof]++;
5313 mutex_unlock(&rvu->rsrc_lock);
5314 goto exit;
5315 }
5316
5317 /* Allocate a mid layer profile and
5318 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5319 */
5320 mutex_lock(&rvu->rsrc_lock);
5321 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5322 if (mid_prof < 0) {
5323 dev_err(rvu->dev,
5324 "%s: Unable to allocate mid layer profile\n", __func__);
5325 mutex_unlock(&rvu->rsrc_lock);
5326 goto exit;
5327 }
5328 mutex_unlock(&rvu->rsrc_lock);
5329 ipolicer->pfvf_map[mid_prof] = 0x00;
5330 ipolicer->ref_count[mid_prof] = 0;
5331
5332 /* Initialize mid layer profile same as 'leaf_prof' */
5333 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5334 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5335 if (rc) {
5336 dev_err(rvu->dev,
5337 "%s: Failed to fetch context of leaf profile %d\n",
5338 __func__, leaf_prof);
5339 goto exit;
5340 }
5341
5342 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5343 aq_req.hdr.pcifunc = 0x00;
5344 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5345 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5346 aq_req.op = NIX_AQ_INSTOP_WRITE;
5347 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5348 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5349 /* Clear higher layer enable bit in the mid profile, just in case */
5350 aq_req.prof.hl_en = 0;
5351 aq_req.prof_mask.hl_en = 1;
5352
5353 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5354 (struct nix_aq_enq_req *)&aq_req, NULL);
5355 if (rc) {
5356 dev_err(rvu->dev,
5357 "%s: Failed to INIT context of mid layer profile %d\n",
5358 __func__, mid_prof);
5359 goto exit;
5360 }
5361
5362 /* Map both leaf profiles to this mid layer profile */
5363 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5364 &aq_req, &aq_rsp,
5365 leaf_prof, mid_prof);
5366 if (rc) {
5367 dev_err(rvu->dev,
5368 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5369 __func__, leaf_prof, mid_prof);
5370 goto exit;
5371 }
5372
5373 mutex_lock(&rvu->rsrc_lock);
5374 ipolicer->ref_count[mid_prof]++;
5375 mutex_unlock(&rvu->rsrc_lock);
5376
5377 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5378 &aq_req, &aq_rsp,
5379 leaf_match, mid_prof);
5380 if (rc) {
5381 dev_err(rvu->dev,
5382 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5383 __func__, leaf_match, mid_prof);
5384 ipolicer->ref_count[mid_prof]--;
5385 goto exit;
5386 }
5387
5388 mutex_lock(&rvu->rsrc_lock);
5389 ipolicer->ref_count[mid_prof]++;
5390 mutex_unlock(&rvu->rsrc_lock);
5391
5392 exit:
5393 return rc;
5394 }
5395
5396 /* Called with mutex rsrc_lock */
nix_clear_ratelimit_aggr(struct rvu * rvu,struct nix_hw * nix_hw,u32 leaf_prof)5397 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5398 u32 leaf_prof)
5399 {
5400 struct nix_cn10k_aq_enq_req aq_req;
5401 struct nix_cn10k_aq_enq_rsp aq_rsp;
5402 struct nix_ipolicer *ipolicer;
5403 u16 mid_prof;
5404 int rc;
5405
5406 mutex_unlock(&rvu->rsrc_lock);
5407
5408 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5409 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5410
5411 mutex_lock(&rvu->rsrc_lock);
5412 if (rc) {
5413 dev_err(rvu->dev,
5414 "%s: Failed to fetch context of leaf profile %d\n",
5415 __func__, leaf_prof);
5416 return;
5417 }
5418
5419 if (!aq_rsp.prof.hl_en)
5420 return;
5421
5422 mid_prof = aq_rsp.prof.band_prof_id;
5423 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5424 ipolicer->ref_count[mid_prof]--;
5425 /* If ref_count is zero, free mid layer profile */
5426 if (!ipolicer->ref_count[mid_prof]) {
5427 ipolicer->pfvf_map[mid_prof] = 0x00;
5428 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5429 }
5430 }
5431
rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu * rvu,struct msg_req * req,struct nix_bandprof_get_hwinfo_rsp * rsp)5432 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5433 struct nix_bandprof_get_hwinfo_rsp *rsp)
5434 {
5435 struct nix_ipolicer *ipolicer;
5436 int blkaddr, layer, err;
5437 struct nix_hw *nix_hw;
5438 u64 tu;
5439
5440 if (!rvu->hw->cap.ipolicer)
5441 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5442
5443 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5444 if (err)
5445 return err;
5446
5447 /* Return number of bandwidth profiles free at each layer */
5448 mutex_lock(&rvu->rsrc_lock);
5449 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5450 if (layer == BAND_PROF_INVAL_LAYER)
5451 continue;
5452
5453 ipolicer = &nix_hw->ipolicer[layer];
5454 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5455 }
5456 mutex_unlock(&rvu->rsrc_lock);
5457
5458 /* Set the policer timeunit in nanosec */
5459 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5460 rsp->policer_timeunit = (tu + 1) * 100;
5461
5462 return 0;
5463 }
5464