1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
14
15 #include "cgx.h"
16 #include "rvu.h"
17 #include "rvu_reg.h"
18 #include "ptp.h"
19
20 #include "rvu_trace.h"
21
22 #define DRV_NAME "rvu_af"
23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
24
25 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
26
27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28 struct rvu_block *block, int lf);
29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30 struct rvu_block *block, int lf);
31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
32
33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
34 int type, int num,
35 void (mbox_handler)(struct work_struct *),
36 void (mbox_up_handler)(struct work_struct *));
37 enum {
38 TYPE_AFVF,
39 TYPE_AFPF,
40 };
41
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45 { 0, } /* end of table */
46 };
47
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci, rvu_id_table);
52
53 static char *mkex_profile; /* MKEX profile name */
54 module_param(mkex_profile, charp, 0000);
55 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
56
57 static char *kpu_profile; /* KPU profile name */
58 module_param(kpu_profile, charp, 0000);
59 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
60
rvu_setup_hw_capabilities(struct rvu * rvu)61 static void rvu_setup_hw_capabilities(struct rvu *rvu)
62 {
63 struct rvu_hwinfo *hw = rvu->hw;
64
65 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66 hw->cap.nix_fixed_txschq_mapping = false;
67 hw->cap.nix_shaping = true;
68 hw->cap.nix_tx_link_bp = true;
69 hw->cap.nix_rx_multicast = true;
70 hw->cap.nix_shaper_toggle_wait = false;
71 hw->rvu = rvu;
72
73 if (is_rvu_pre_96xx_C0(rvu)) {
74 hw->cap.nix_fixed_txschq_mapping = true;
75 hw->cap.nix_txsch_per_cgx_lmac = 4;
76 hw->cap.nix_txsch_per_lbk_lmac = 132;
77 hw->cap.nix_txsch_per_sdp_lmac = 76;
78 hw->cap.nix_shaping = false;
79 hw->cap.nix_tx_link_bp = false;
80 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
81 hw->cap.nix_rx_multicast = false;
82 }
83 if (!is_rvu_pre_96xx_C0(rvu))
84 hw->cap.nix_shaper_toggle_wait = true;
85
86 if (!is_rvu_otx2(rvu))
87 hw->cap.per_pf_mbox_regs = true;
88 }
89
90 /* Poll a RVU block's register 'offset', for a 'zero'
91 * or 'nonzero' at bits specified by 'mask'
92 */
rvu_poll_reg(struct rvu * rvu,u64 block,u64 offset,u64 mask,bool zero)93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
94 {
95 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
96 bool twice = false;
97 void __iomem *reg;
98 u64 reg_val;
99
100 reg = rvu->afreg_base + ((block << 28) | offset);
101 again:
102 reg_val = readq(reg);
103 if (zero && !(reg_val & mask))
104 return 0;
105 if (!zero && (reg_val & mask))
106 return 0;
107 if (time_before(jiffies, timeout)) {
108 usleep_range(1, 5);
109 goto again;
110 }
111 /* In scenarios where CPU is scheduled out before checking
112 * 'time_before' (above) and gets scheduled in such that
113 * jiffies are beyond timeout value, then check again if HW is
114 * done with the operation in the meantime.
115 */
116 if (!twice) {
117 twice = true;
118 goto again;
119 }
120 return -EBUSY;
121 }
122
rvu_alloc_rsrc(struct rsrc_bmap * rsrc)123 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
124 {
125 int id;
126
127 if (!rsrc->bmap)
128 return -EINVAL;
129
130 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
131 if (id >= rsrc->max)
132 return -ENOSPC;
133
134 __set_bit(id, rsrc->bmap);
135
136 return id;
137 }
138
rvu_alloc_rsrc_contig(struct rsrc_bmap * rsrc,int nrsrc)139 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
140 {
141 int start;
142
143 if (!rsrc->bmap)
144 return -EINVAL;
145
146 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
147 if (start >= rsrc->max)
148 return -ENOSPC;
149
150 bitmap_set(rsrc->bmap, start, nrsrc);
151 return start;
152 }
153
rvu_free_rsrc_contig(struct rsrc_bmap * rsrc,int nrsrc,int start)154 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
155 {
156 if (!rsrc->bmap)
157 return;
158 if (start >= rsrc->max)
159 return;
160
161 bitmap_clear(rsrc->bmap, start, nrsrc);
162 }
163
rvu_rsrc_check_contig(struct rsrc_bmap * rsrc,int nrsrc)164 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
165 {
166 int start;
167
168 if (!rsrc->bmap)
169 return false;
170
171 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
172 if (start >= rsrc->max)
173 return false;
174
175 return true;
176 }
177
rvu_free_rsrc(struct rsrc_bmap * rsrc,int id)178 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
179 {
180 if (!rsrc->bmap)
181 return;
182
183 __clear_bit(id, rsrc->bmap);
184 }
185
rvu_rsrc_free_count(struct rsrc_bmap * rsrc)186 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
187 {
188 int used;
189
190 if (!rsrc->bmap)
191 return 0;
192
193 used = bitmap_weight(rsrc->bmap, rsrc->max);
194 return (rsrc->max - used);
195 }
196
is_rsrc_free(struct rsrc_bmap * rsrc,int id)197 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
198 {
199 if (!rsrc->bmap)
200 return false;
201
202 return !test_bit(id, rsrc->bmap);
203 }
204
rvu_alloc_bitmap(struct rsrc_bmap * rsrc)205 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
206 {
207 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
208 sizeof(long), GFP_KERNEL);
209 if (!rsrc->bmap)
210 return -ENOMEM;
211 return 0;
212 }
213
rvu_free_bitmap(struct rsrc_bmap * rsrc)214 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
215 {
216 kfree(rsrc->bmap);
217 }
218
219 /* Get block LF's HW index from a PF_FUNC's block slot number */
rvu_get_lf(struct rvu * rvu,struct rvu_block * block,u16 pcifunc,u16 slot)220 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
221 {
222 u16 match = 0;
223 int lf;
224
225 mutex_lock(&rvu->rsrc_lock);
226 for (lf = 0; lf < block->lf.max; lf++) {
227 if (block->fn_map[lf] == pcifunc) {
228 if (slot == match) {
229 mutex_unlock(&rvu->rsrc_lock);
230 return lf;
231 }
232 match++;
233 }
234 }
235 mutex_unlock(&rvu->rsrc_lock);
236 return -ENODEV;
237 }
238
239 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
240 * Some silicon variants of OcteonTX2 supports
241 * multiple blocks of same type.
242 *
243 * @pcifunc has to be zero when no LF is yet attached.
244 *
245 * For a pcifunc if LFs are attached from multiple blocks of same type, then
246 * return blkaddr of first encountered block.
247 */
rvu_get_blkaddr(struct rvu * rvu,int blktype,u16 pcifunc)248 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
249 {
250 int devnum, blkaddr = -ENODEV;
251 u64 cfg, reg;
252 bool is_pf;
253
254 switch (blktype) {
255 case BLKTYPE_NPC:
256 blkaddr = BLKADDR_NPC;
257 goto exit;
258 case BLKTYPE_NPA:
259 blkaddr = BLKADDR_NPA;
260 goto exit;
261 case BLKTYPE_NIX:
262 /* For now assume NIX0 */
263 if (!pcifunc) {
264 blkaddr = BLKADDR_NIX0;
265 goto exit;
266 }
267 break;
268 case BLKTYPE_SSO:
269 blkaddr = BLKADDR_SSO;
270 goto exit;
271 case BLKTYPE_SSOW:
272 blkaddr = BLKADDR_SSOW;
273 goto exit;
274 case BLKTYPE_TIM:
275 blkaddr = BLKADDR_TIM;
276 goto exit;
277 case BLKTYPE_CPT:
278 /* For now assume CPT0 */
279 if (!pcifunc) {
280 blkaddr = BLKADDR_CPT0;
281 goto exit;
282 }
283 break;
284 }
285
286 /* Check if this is a RVU PF or VF */
287 if (pcifunc & RVU_PFVF_FUNC_MASK) {
288 is_pf = false;
289 devnum = rvu_get_hwvf(rvu, pcifunc);
290 } else {
291 is_pf = true;
292 devnum = rvu_get_pf(pcifunc);
293 }
294
295 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
296 * 'BLKADDR_NIX1'.
297 */
298 if (blktype == BLKTYPE_NIX) {
299 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
300 RVU_PRIV_HWVFX_NIXX_CFG(0);
301 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
302 if (cfg) {
303 blkaddr = BLKADDR_NIX0;
304 goto exit;
305 }
306
307 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
308 RVU_PRIV_HWVFX_NIXX_CFG(1);
309 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
310 if (cfg)
311 blkaddr = BLKADDR_NIX1;
312 }
313
314 if (blktype == BLKTYPE_CPT) {
315 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
316 RVU_PRIV_HWVFX_CPTX_CFG(0);
317 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
318 if (cfg) {
319 blkaddr = BLKADDR_CPT0;
320 goto exit;
321 }
322
323 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
324 RVU_PRIV_HWVFX_CPTX_CFG(1);
325 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
326 if (cfg)
327 blkaddr = BLKADDR_CPT1;
328 }
329
330 exit:
331 if (is_block_implemented(rvu->hw, blkaddr))
332 return blkaddr;
333 return -ENODEV;
334 }
335
rvu_update_rsrc_map(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,u16 pcifunc,u16 lf,bool attach)336 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
337 struct rvu_block *block, u16 pcifunc,
338 u16 lf, bool attach)
339 {
340 int devnum, num_lfs = 0;
341 bool is_pf;
342 u64 reg;
343
344 if (lf >= block->lf.max) {
345 dev_err(&rvu->pdev->dev,
346 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
347 __func__, lf, block->name, block->lf.max);
348 return;
349 }
350
351 /* Check if this is for a RVU PF or VF */
352 if (pcifunc & RVU_PFVF_FUNC_MASK) {
353 is_pf = false;
354 devnum = rvu_get_hwvf(rvu, pcifunc);
355 } else {
356 is_pf = true;
357 devnum = rvu_get_pf(pcifunc);
358 }
359
360 block->fn_map[lf] = attach ? pcifunc : 0;
361
362 switch (block->addr) {
363 case BLKADDR_NPA:
364 pfvf->npalf = attach ? true : false;
365 num_lfs = pfvf->npalf;
366 break;
367 case BLKADDR_NIX0:
368 case BLKADDR_NIX1:
369 pfvf->nixlf = attach ? true : false;
370 num_lfs = pfvf->nixlf;
371 break;
372 case BLKADDR_SSO:
373 attach ? pfvf->sso++ : pfvf->sso--;
374 num_lfs = pfvf->sso;
375 break;
376 case BLKADDR_SSOW:
377 attach ? pfvf->ssow++ : pfvf->ssow--;
378 num_lfs = pfvf->ssow;
379 break;
380 case BLKADDR_TIM:
381 attach ? pfvf->timlfs++ : pfvf->timlfs--;
382 num_lfs = pfvf->timlfs;
383 break;
384 case BLKADDR_CPT0:
385 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
386 num_lfs = pfvf->cptlfs;
387 break;
388 case BLKADDR_CPT1:
389 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
390 num_lfs = pfvf->cpt1_lfs;
391 break;
392 }
393
394 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
395 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
396 }
397
rvu_get_pf(u16 pcifunc)398 inline int rvu_get_pf(u16 pcifunc)
399 {
400 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
401 }
402
rvu_get_pf_numvfs(struct rvu * rvu,int pf,int * numvfs,int * hwvf)403 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
404 {
405 u64 cfg;
406
407 /* Get numVFs attached to this PF and first HWVF */
408 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
409 if (numvfs)
410 *numvfs = (cfg >> 12) & 0xFF;
411 if (hwvf)
412 *hwvf = cfg & 0xFFF;
413 }
414
rvu_get_hwvf(struct rvu * rvu,int pcifunc)415 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
416 {
417 int pf, func;
418 u64 cfg;
419
420 pf = rvu_get_pf(pcifunc);
421 func = pcifunc & RVU_PFVF_FUNC_MASK;
422
423 /* Get first HWVF attached to this PF */
424 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
425
426 return ((cfg & 0xFFF) + func - 1);
427 }
428
rvu_get_pfvf(struct rvu * rvu,int pcifunc)429 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
430 {
431 /* Check if it is a PF or VF */
432 if (pcifunc & RVU_PFVF_FUNC_MASK)
433 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
434 else
435 return &rvu->pf[rvu_get_pf(pcifunc)];
436 }
437
is_pf_func_valid(struct rvu * rvu,u16 pcifunc)438 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
439 {
440 int pf, vf, nvfs;
441 u64 cfg;
442
443 pf = rvu_get_pf(pcifunc);
444 if (pf >= rvu->hw->total_pfs)
445 return false;
446
447 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
448 return true;
449
450 /* Check if VF is within number of VFs attached to this PF */
451 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
452 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
453 nvfs = (cfg >> 12) & 0xFF;
454 if (vf >= nvfs)
455 return false;
456
457 return true;
458 }
459
is_block_implemented(struct rvu_hwinfo * hw,int blkaddr)460 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
461 {
462 struct rvu_block *block;
463
464 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
465 return false;
466
467 block = &hw->block[blkaddr];
468 return block->implemented;
469 }
470
rvu_check_block_implemented(struct rvu * rvu)471 static void rvu_check_block_implemented(struct rvu *rvu)
472 {
473 struct rvu_hwinfo *hw = rvu->hw;
474 struct rvu_block *block;
475 int blkid;
476 u64 cfg;
477
478 /* For each block check if 'implemented' bit is set */
479 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
480 block = &hw->block[blkid];
481 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
482 if (cfg & BIT_ULL(11))
483 block->implemented = true;
484 }
485 }
486
rvu_setup_rvum_blk_revid(struct rvu * rvu)487 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
488 {
489 rvu_write64(rvu, BLKADDR_RVUM,
490 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
491 RVU_BLK_RVUM_REVID);
492 }
493
rvu_clear_rvum_blk_revid(struct rvu * rvu)494 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
495 {
496 rvu_write64(rvu, BLKADDR_RVUM,
497 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
498 }
499
rvu_lf_reset(struct rvu * rvu,struct rvu_block * block,int lf)500 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
501 {
502 int err;
503
504 if (!block->implemented)
505 return 0;
506
507 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
508 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
509 true);
510 return err;
511 }
512
rvu_block_reset(struct rvu * rvu,int blkaddr,u64 rst_reg)513 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
514 {
515 struct rvu_block *block = &rvu->hw->block[blkaddr];
516 int err;
517
518 if (!block->implemented)
519 return;
520
521 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
522 err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
523 if (err) {
524 dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
525 while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
526 ;
527 }
528 }
529
rvu_reset_all_blocks(struct rvu * rvu)530 static void rvu_reset_all_blocks(struct rvu *rvu)
531 {
532 /* Do a HW reset of all RVU blocks */
533 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
534 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
535 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
536 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
537 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
538 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
539 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
540 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
541 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
542 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
543 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
544 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
545 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
546 }
547
rvu_scan_block(struct rvu * rvu,struct rvu_block * block)548 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
549 {
550 struct rvu_pfvf *pfvf;
551 u64 cfg;
552 int lf;
553
554 for (lf = 0; lf < block->lf.max; lf++) {
555 cfg = rvu_read64(rvu, block->addr,
556 block->lfcfg_reg | (lf << block->lfshift));
557 if (!(cfg & BIT_ULL(63)))
558 continue;
559
560 /* Set this resource as being used */
561 __set_bit(lf, block->lf.bmap);
562
563 /* Get, to whom this LF is attached */
564 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
565 rvu_update_rsrc_map(rvu, pfvf, block,
566 (cfg >> 8) & 0xFFFF, lf, true);
567
568 /* Set start MSIX vector for this LF within this PF/VF */
569 rvu_set_msix_offset(rvu, pfvf, block, lf);
570 }
571 }
572
rvu_check_min_msix_vec(struct rvu * rvu,int nvecs,int pf,int vf)573 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
574 {
575 int min_vecs;
576
577 if (!vf)
578 goto check_pf;
579
580 if (!nvecs) {
581 dev_warn(rvu->dev,
582 "PF%d:VF%d is configured with zero msix vectors, %d\n",
583 pf, vf - 1, nvecs);
584 }
585 return;
586
587 check_pf:
588 if (pf == 0)
589 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
590 else
591 min_vecs = RVU_PF_INT_VEC_CNT;
592
593 if (!(nvecs < min_vecs))
594 return;
595 dev_warn(rvu->dev,
596 "PF%d is configured with too few vectors, %d, min is %d\n",
597 pf, nvecs, min_vecs);
598 }
599
rvu_setup_msix_resources(struct rvu * rvu)600 static int rvu_setup_msix_resources(struct rvu *rvu)
601 {
602 struct rvu_hwinfo *hw = rvu->hw;
603 int pf, vf, numvfs, hwvf, err;
604 int nvecs, offset, max_msix;
605 struct rvu_pfvf *pfvf;
606 u64 cfg, phy_addr;
607 dma_addr_t iova;
608
609 for (pf = 0; pf < hw->total_pfs; pf++) {
610 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
611 /* If PF is not enabled, nothing to do */
612 if (!((cfg >> 20) & 0x01))
613 continue;
614
615 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
616
617 pfvf = &rvu->pf[pf];
618 /* Get num of MSIX vectors attached to this PF */
619 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
620 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
621 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
622
623 /* Alloc msix bitmap for this PF */
624 err = rvu_alloc_bitmap(&pfvf->msix);
625 if (err)
626 return err;
627
628 /* Allocate memory for MSIX vector to RVU block LF mapping */
629 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
630 sizeof(u16), GFP_KERNEL);
631 if (!pfvf->msix_lfmap)
632 return -ENOMEM;
633
634 /* For PF0 (AF) firmware will set msix vector offsets for
635 * AF, block AF and PF0_INT vectors, so jump to VFs.
636 */
637 if (!pf)
638 goto setup_vfmsix;
639
640 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
641 * These are allocated on driver init and never freed,
642 * so no need to set 'msix_lfmap' for these.
643 */
644 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
645 nvecs = (cfg >> 12) & 0xFF;
646 cfg &= ~0x7FFULL;
647 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
648 rvu_write64(rvu, BLKADDR_RVUM,
649 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
650 setup_vfmsix:
651 /* Alloc msix bitmap for VFs */
652 for (vf = 0; vf < numvfs; vf++) {
653 pfvf = &rvu->hwvf[hwvf + vf];
654 /* Get num of MSIX vectors attached to this VF */
655 cfg = rvu_read64(rvu, BLKADDR_RVUM,
656 RVU_PRIV_PFX_MSIX_CFG(pf));
657 pfvf->msix.max = (cfg & 0xFFF) + 1;
658 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
659
660 /* Alloc msix bitmap for this VF */
661 err = rvu_alloc_bitmap(&pfvf->msix);
662 if (err)
663 return err;
664
665 pfvf->msix_lfmap =
666 devm_kcalloc(rvu->dev, pfvf->msix.max,
667 sizeof(u16), GFP_KERNEL);
668 if (!pfvf->msix_lfmap)
669 return -ENOMEM;
670
671 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
672 * These are allocated on driver init and never freed,
673 * so no need to set 'msix_lfmap' for these.
674 */
675 cfg = rvu_read64(rvu, BLKADDR_RVUM,
676 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
677 nvecs = (cfg >> 12) & 0xFF;
678 cfg &= ~0x7FFULL;
679 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
680 rvu_write64(rvu, BLKADDR_RVUM,
681 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
682 cfg | offset);
683 }
684 }
685
686 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
687 * create an IOMMU mapping for the physical address configured by
688 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
689 */
690 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
691 max_msix = cfg & 0xFFFFF;
692 if (rvu->fwdata && rvu->fwdata->msixtr_base)
693 phy_addr = rvu->fwdata->msixtr_base;
694 else
695 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
696
697 iova = dma_map_resource(rvu->dev, phy_addr,
698 max_msix * PCI_MSIX_ENTRY_SIZE,
699 DMA_BIDIRECTIONAL, 0);
700
701 if (dma_mapping_error(rvu->dev, iova))
702 return -ENOMEM;
703
704 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
705 rvu->msix_base_iova = iova;
706 rvu->msixtr_base_phy = phy_addr;
707
708 return 0;
709 }
710
rvu_reset_msix(struct rvu * rvu)711 static void rvu_reset_msix(struct rvu *rvu)
712 {
713 /* Restore msixtr base register */
714 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
715 rvu->msixtr_base_phy);
716 }
717
rvu_free_hw_resources(struct rvu * rvu)718 static void rvu_free_hw_resources(struct rvu *rvu)
719 {
720 struct rvu_hwinfo *hw = rvu->hw;
721 struct rvu_block *block;
722 struct rvu_pfvf *pfvf;
723 int id, max_msix;
724 u64 cfg;
725
726 rvu_npa_freemem(rvu);
727 rvu_npc_freemem(rvu);
728 rvu_nix_freemem(rvu);
729
730 /* Free block LF bitmaps */
731 for (id = 0; id < BLK_COUNT; id++) {
732 block = &hw->block[id];
733 kfree(block->lf.bmap);
734 }
735
736 /* Free MSIX bitmaps */
737 for (id = 0; id < hw->total_pfs; id++) {
738 pfvf = &rvu->pf[id];
739 kfree(pfvf->msix.bmap);
740 }
741
742 for (id = 0; id < hw->total_vfs; id++) {
743 pfvf = &rvu->hwvf[id];
744 kfree(pfvf->msix.bmap);
745 }
746
747 /* Unmap MSIX vector base IOVA mapping */
748 if (!rvu->msix_base_iova)
749 return;
750 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
751 max_msix = cfg & 0xFFFFF;
752 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
753 max_msix * PCI_MSIX_ENTRY_SIZE,
754 DMA_BIDIRECTIONAL, 0);
755
756 rvu_reset_msix(rvu);
757 mutex_destroy(&rvu->rsrc_lock);
758 }
759
rvu_setup_pfvf_macaddress(struct rvu * rvu)760 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
761 {
762 struct rvu_hwinfo *hw = rvu->hw;
763 int pf, vf, numvfs, hwvf;
764 struct rvu_pfvf *pfvf;
765 u64 *mac;
766
767 for (pf = 0; pf < hw->total_pfs; pf++) {
768 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
769 if (!pf)
770 goto lbkvf;
771
772 if (!is_pf_cgxmapped(rvu, pf))
773 continue;
774 /* Assign MAC address to PF */
775 pfvf = &rvu->pf[pf];
776 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
777 mac = &rvu->fwdata->pf_macs[pf];
778 if (*mac)
779 u64_to_ether_addr(*mac, pfvf->mac_addr);
780 else
781 eth_random_addr(pfvf->mac_addr);
782 } else {
783 eth_random_addr(pfvf->mac_addr);
784 }
785 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
786
787 lbkvf:
788 /* Assign MAC address to VFs*/
789 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
790 for (vf = 0; vf < numvfs; vf++, hwvf++) {
791 pfvf = &rvu->hwvf[hwvf];
792 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
793 mac = &rvu->fwdata->vf_macs[hwvf];
794 if (*mac)
795 u64_to_ether_addr(*mac, pfvf->mac_addr);
796 else
797 eth_random_addr(pfvf->mac_addr);
798 } else {
799 eth_random_addr(pfvf->mac_addr);
800 }
801 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
802 }
803 }
804 }
805
rvu_fwdata_init(struct rvu * rvu)806 static int rvu_fwdata_init(struct rvu *rvu)
807 {
808 u64 fwdbase;
809 int err;
810
811 /* Get firmware data base address */
812 err = cgx_get_fwdata_base(&fwdbase);
813 if (err)
814 goto fail;
815 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
816 if (!rvu->fwdata)
817 goto fail;
818 if (!is_rvu_fwdata_valid(rvu)) {
819 dev_err(rvu->dev,
820 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
821 iounmap(rvu->fwdata);
822 rvu->fwdata = NULL;
823 return -EINVAL;
824 }
825 return 0;
826 fail:
827 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
828 return -EIO;
829 }
830
rvu_fwdata_exit(struct rvu * rvu)831 static void rvu_fwdata_exit(struct rvu *rvu)
832 {
833 if (rvu->fwdata)
834 iounmap(rvu->fwdata);
835 }
836
rvu_setup_nix_hw_resource(struct rvu * rvu,int blkaddr)837 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
838 {
839 struct rvu_hwinfo *hw = rvu->hw;
840 struct rvu_block *block;
841 int blkid;
842 u64 cfg;
843
844 /* Init NIX LF's bitmap */
845 block = &hw->block[blkaddr];
846 if (!block->implemented)
847 return 0;
848 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
849 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
850 block->lf.max = cfg & 0xFFF;
851 block->addr = blkaddr;
852 block->type = BLKTYPE_NIX;
853 block->lfshift = 8;
854 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
855 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
856 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
857 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
858 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
859 block->lfreset_reg = NIX_AF_LF_RST;
860 block->rvu = rvu;
861 sprintf(block->name, "NIX%d", blkid);
862 rvu->nix_blkaddr[blkid] = blkaddr;
863 return rvu_alloc_bitmap(&block->lf);
864 }
865
rvu_setup_cpt_hw_resource(struct rvu * rvu,int blkaddr)866 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
867 {
868 struct rvu_hwinfo *hw = rvu->hw;
869 struct rvu_block *block;
870 int blkid;
871 u64 cfg;
872
873 /* Init CPT LF's bitmap */
874 block = &hw->block[blkaddr];
875 if (!block->implemented)
876 return 0;
877 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
878 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
879 block->lf.max = cfg & 0xFF;
880 block->addr = blkaddr;
881 block->type = BLKTYPE_CPT;
882 block->multislot = true;
883 block->lfshift = 3;
884 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
885 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
886 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
887 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
888 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
889 block->lfreset_reg = CPT_AF_LF_RST;
890 block->rvu = rvu;
891 sprintf(block->name, "CPT%d", blkid);
892 return rvu_alloc_bitmap(&block->lf);
893 }
894
rvu_get_lbk_bufsize(struct rvu * rvu)895 static void rvu_get_lbk_bufsize(struct rvu *rvu)
896 {
897 struct pci_dev *pdev = NULL;
898 void __iomem *base;
899 u64 lbk_const;
900
901 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
902 PCI_DEVID_OCTEONTX2_LBK, pdev);
903 if (!pdev)
904 return;
905
906 base = pci_ioremap_bar(pdev, 0);
907 if (!base)
908 goto err_put;
909
910 lbk_const = readq(base + LBK_CONST);
911
912 /* cache fifo size */
913 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
914
915 iounmap(base);
916 err_put:
917 pci_dev_put(pdev);
918 }
919
rvu_setup_hw_resources(struct rvu * rvu)920 static int rvu_setup_hw_resources(struct rvu *rvu)
921 {
922 struct rvu_hwinfo *hw = rvu->hw;
923 struct rvu_block *block;
924 int blkid, err;
925 u64 cfg;
926
927 /* Get HW supported max RVU PF & VF count */
928 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
929 hw->total_pfs = (cfg >> 32) & 0xFF;
930 hw->total_vfs = (cfg >> 20) & 0xFFF;
931 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
932
933 /* Init NPA LF's bitmap */
934 block = &hw->block[BLKADDR_NPA];
935 if (!block->implemented)
936 goto nix;
937 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
938 block->lf.max = (cfg >> 16) & 0xFFF;
939 block->addr = BLKADDR_NPA;
940 block->type = BLKTYPE_NPA;
941 block->lfshift = 8;
942 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
943 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
944 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
945 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
946 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
947 block->lfreset_reg = NPA_AF_LF_RST;
948 block->rvu = rvu;
949 sprintf(block->name, "NPA");
950 err = rvu_alloc_bitmap(&block->lf);
951 if (err) {
952 dev_err(rvu->dev,
953 "%s: Failed to allocate NPA LF bitmap\n", __func__);
954 return err;
955 }
956
957 nix:
958 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
959 if (err) {
960 dev_err(rvu->dev,
961 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
962 return err;
963 }
964
965 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
966 if (err) {
967 dev_err(rvu->dev,
968 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
969 return err;
970 }
971
972 /* Init SSO group's bitmap */
973 block = &hw->block[BLKADDR_SSO];
974 if (!block->implemented)
975 goto ssow;
976 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
977 block->lf.max = cfg & 0xFFFF;
978 block->addr = BLKADDR_SSO;
979 block->type = BLKTYPE_SSO;
980 block->multislot = true;
981 block->lfshift = 3;
982 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
983 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
984 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
985 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
986 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
987 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
988 block->rvu = rvu;
989 sprintf(block->name, "SSO GROUP");
990 err = rvu_alloc_bitmap(&block->lf);
991 if (err) {
992 dev_err(rvu->dev,
993 "%s: Failed to allocate SSO LF bitmap\n", __func__);
994 return err;
995 }
996
997 ssow:
998 /* Init SSO workslot's bitmap */
999 block = &hw->block[BLKADDR_SSOW];
1000 if (!block->implemented)
1001 goto tim;
1002 block->lf.max = (cfg >> 56) & 0xFF;
1003 block->addr = BLKADDR_SSOW;
1004 block->type = BLKTYPE_SSOW;
1005 block->multislot = true;
1006 block->lfshift = 3;
1007 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1008 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1009 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1010 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1011 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1012 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1013 block->rvu = rvu;
1014 sprintf(block->name, "SSOWS");
1015 err = rvu_alloc_bitmap(&block->lf);
1016 if (err) {
1017 dev_err(rvu->dev,
1018 "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1019 return err;
1020 }
1021
1022 tim:
1023 /* Init TIM LF's bitmap */
1024 block = &hw->block[BLKADDR_TIM];
1025 if (!block->implemented)
1026 goto cpt;
1027 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1028 block->lf.max = cfg & 0xFFFF;
1029 block->addr = BLKADDR_TIM;
1030 block->type = BLKTYPE_TIM;
1031 block->multislot = true;
1032 block->lfshift = 3;
1033 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1034 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1035 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1036 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1037 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1038 block->lfreset_reg = TIM_AF_LF_RST;
1039 block->rvu = rvu;
1040 sprintf(block->name, "TIM");
1041 err = rvu_alloc_bitmap(&block->lf);
1042 if (err) {
1043 dev_err(rvu->dev,
1044 "%s: Failed to allocate TIM LF bitmap\n", __func__);
1045 return err;
1046 }
1047
1048 cpt:
1049 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1050 if (err) {
1051 dev_err(rvu->dev,
1052 "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1053 return err;
1054 }
1055 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1056 if (err) {
1057 dev_err(rvu->dev,
1058 "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1059 return err;
1060 }
1061
1062 /* Allocate memory for PFVF data */
1063 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1064 sizeof(struct rvu_pfvf), GFP_KERNEL);
1065 if (!rvu->pf) {
1066 dev_err(rvu->dev,
1067 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1068 return -ENOMEM;
1069 }
1070
1071 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1072 sizeof(struct rvu_pfvf), GFP_KERNEL);
1073 if (!rvu->hwvf) {
1074 dev_err(rvu->dev,
1075 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1076 return -ENOMEM;
1077 }
1078
1079 mutex_init(&rvu->rsrc_lock);
1080
1081 rvu_fwdata_init(rvu);
1082
1083 err = rvu_setup_msix_resources(rvu);
1084 if (err) {
1085 dev_err(rvu->dev,
1086 "%s: Failed to setup MSIX resources\n", __func__);
1087 return err;
1088 }
1089
1090 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1091 block = &hw->block[blkid];
1092 if (!block->lf.bmap)
1093 continue;
1094
1095 /* Allocate memory for block LF/slot to pcifunc mapping info */
1096 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1097 sizeof(u16), GFP_KERNEL);
1098 if (!block->fn_map) {
1099 err = -ENOMEM;
1100 goto msix_err;
1101 }
1102
1103 /* Scan all blocks to check if low level firmware has
1104 * already provisioned any of the resources to a PF/VF.
1105 */
1106 rvu_scan_block(rvu, block);
1107 }
1108
1109 err = rvu_set_channels_base(rvu);
1110 if (err)
1111 goto msix_err;
1112
1113 err = rvu_npc_init(rvu);
1114 if (err) {
1115 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1116 goto npc_err;
1117 }
1118
1119 err = rvu_cgx_init(rvu);
1120 if (err) {
1121 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1122 goto cgx_err;
1123 }
1124
1125 /* Assign MACs for CGX mapped functions */
1126 rvu_setup_pfvf_macaddress(rvu);
1127
1128 err = rvu_npa_init(rvu);
1129 if (err) {
1130 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1131 goto npa_err;
1132 }
1133
1134 rvu_get_lbk_bufsize(rvu);
1135
1136 err = rvu_nix_init(rvu);
1137 if (err) {
1138 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1139 goto nix_err;
1140 }
1141
1142 err = rvu_sdp_init(rvu);
1143 if (err) {
1144 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1145 goto nix_err;
1146 }
1147
1148 rvu_program_channels(rvu);
1149
1150 return 0;
1151
1152 nix_err:
1153 rvu_nix_freemem(rvu);
1154 npa_err:
1155 rvu_npa_freemem(rvu);
1156 cgx_err:
1157 rvu_cgx_exit(rvu);
1158 npc_err:
1159 rvu_npc_freemem(rvu);
1160 rvu_fwdata_exit(rvu);
1161 msix_err:
1162 rvu_reset_msix(rvu);
1163 return err;
1164 }
1165
1166 /* NPA and NIX admin queue APIs */
rvu_aq_free(struct rvu * rvu,struct admin_queue * aq)1167 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1168 {
1169 if (!aq)
1170 return;
1171
1172 qmem_free(rvu->dev, aq->inst);
1173 qmem_free(rvu->dev, aq->res);
1174 devm_kfree(rvu->dev, aq);
1175 }
1176
rvu_aq_alloc(struct rvu * rvu,struct admin_queue ** ad_queue,int qsize,int inst_size,int res_size)1177 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1178 int qsize, int inst_size, int res_size)
1179 {
1180 struct admin_queue *aq;
1181 int err;
1182
1183 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1184 if (!*ad_queue)
1185 return -ENOMEM;
1186 aq = *ad_queue;
1187
1188 /* Alloc memory for instructions i.e AQ */
1189 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1190 if (err) {
1191 devm_kfree(rvu->dev, aq);
1192 return err;
1193 }
1194
1195 /* Alloc memory for results */
1196 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1197 if (err) {
1198 rvu_aq_free(rvu, aq);
1199 return err;
1200 }
1201
1202 spin_lock_init(&aq->lock);
1203 return 0;
1204 }
1205
rvu_mbox_handler_ready(struct rvu * rvu,struct msg_req * req,struct ready_msg_rsp * rsp)1206 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1207 struct ready_msg_rsp *rsp)
1208 {
1209 if (rvu->fwdata) {
1210 rsp->rclk_freq = rvu->fwdata->rclk;
1211 rsp->sclk_freq = rvu->fwdata->sclk;
1212 }
1213 return 0;
1214 }
1215
1216 /* Get current count of a RVU block's LF/slots
1217 * provisioned to a given RVU func.
1218 */
rvu_get_rsrc_mapcount(struct rvu_pfvf * pfvf,int blkaddr)1219 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1220 {
1221 switch (blkaddr) {
1222 case BLKADDR_NPA:
1223 return pfvf->npalf ? 1 : 0;
1224 case BLKADDR_NIX0:
1225 case BLKADDR_NIX1:
1226 return pfvf->nixlf ? 1 : 0;
1227 case BLKADDR_SSO:
1228 return pfvf->sso;
1229 case BLKADDR_SSOW:
1230 return pfvf->ssow;
1231 case BLKADDR_TIM:
1232 return pfvf->timlfs;
1233 case BLKADDR_CPT0:
1234 return pfvf->cptlfs;
1235 case BLKADDR_CPT1:
1236 return pfvf->cpt1_lfs;
1237 }
1238 return 0;
1239 }
1240
1241 /* Return true if LFs of block type are attached to pcifunc */
is_blktype_attached(struct rvu_pfvf * pfvf,int blktype)1242 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1243 {
1244 switch (blktype) {
1245 case BLKTYPE_NPA:
1246 return pfvf->npalf ? 1 : 0;
1247 case BLKTYPE_NIX:
1248 return pfvf->nixlf ? 1 : 0;
1249 case BLKTYPE_SSO:
1250 return !!pfvf->sso;
1251 case BLKTYPE_SSOW:
1252 return !!pfvf->ssow;
1253 case BLKTYPE_TIM:
1254 return !!pfvf->timlfs;
1255 case BLKTYPE_CPT:
1256 return pfvf->cptlfs || pfvf->cpt1_lfs;
1257 }
1258
1259 return false;
1260 }
1261
is_pffunc_map_valid(struct rvu * rvu,u16 pcifunc,int blktype)1262 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1263 {
1264 struct rvu_pfvf *pfvf;
1265
1266 if (!is_pf_func_valid(rvu, pcifunc))
1267 return false;
1268
1269 pfvf = rvu_get_pfvf(rvu, pcifunc);
1270
1271 /* Check if this PFFUNC has a LF of type blktype attached */
1272 if (!is_blktype_attached(pfvf, blktype))
1273 return false;
1274
1275 return true;
1276 }
1277
rvu_lookup_rsrc(struct rvu * rvu,struct rvu_block * block,int pcifunc,int slot)1278 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1279 int pcifunc, int slot)
1280 {
1281 u64 val;
1282
1283 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1284 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1285 /* Wait for the lookup to finish */
1286 /* TODO: put some timeout here */
1287 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1288 ;
1289
1290 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1291
1292 /* Check LF valid bit */
1293 if (!(val & (1ULL << 12)))
1294 return -1;
1295
1296 return (val & 0xFFF);
1297 }
1298
rvu_get_blkaddr_from_slot(struct rvu * rvu,int blktype,u16 pcifunc,u16 global_slot,u16 * slot_in_block)1299 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1300 u16 global_slot, u16 *slot_in_block)
1301 {
1302 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1303 int numlfs, total_lfs = 0, nr_blocks = 0;
1304 int i, num_blkaddr[BLK_COUNT] = { 0 };
1305 struct rvu_block *block;
1306 int blkaddr;
1307 u16 start_slot;
1308
1309 if (!is_blktype_attached(pfvf, blktype))
1310 return -ENODEV;
1311
1312 /* Get all the block addresses from which LFs are attached to
1313 * the given pcifunc in num_blkaddr[].
1314 */
1315 for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1316 block = &rvu->hw->block[blkaddr];
1317 if (block->type != blktype)
1318 continue;
1319 if (!is_block_implemented(rvu->hw, blkaddr))
1320 continue;
1321
1322 numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1323 if (numlfs) {
1324 total_lfs += numlfs;
1325 num_blkaddr[nr_blocks] = blkaddr;
1326 nr_blocks++;
1327 }
1328 }
1329
1330 if (global_slot >= total_lfs)
1331 return -ENODEV;
1332
1333 /* Based on the given global slot number retrieve the
1334 * correct block address out of all attached block
1335 * addresses and slot number in that block.
1336 */
1337 total_lfs = 0;
1338 blkaddr = -ENODEV;
1339 for (i = 0; i < nr_blocks; i++) {
1340 numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1341 total_lfs += numlfs;
1342 if (global_slot < total_lfs) {
1343 blkaddr = num_blkaddr[i];
1344 start_slot = total_lfs - numlfs;
1345 *slot_in_block = global_slot - start_slot;
1346 break;
1347 }
1348 }
1349
1350 return blkaddr;
1351 }
1352
rvu_detach_block(struct rvu * rvu,int pcifunc,int blktype)1353 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1354 {
1355 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1356 struct rvu_hwinfo *hw = rvu->hw;
1357 struct rvu_block *block;
1358 int slot, lf, num_lfs;
1359 int blkaddr;
1360
1361 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1362 if (blkaddr < 0)
1363 return;
1364
1365 if (blktype == BLKTYPE_NIX)
1366 rvu_nix_reset_mac(pfvf, pcifunc);
1367
1368 block = &hw->block[blkaddr];
1369
1370 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1371 if (!num_lfs)
1372 return;
1373
1374 for (slot = 0; slot < num_lfs; slot++) {
1375 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1376 if (lf < 0) /* This should never happen */
1377 continue;
1378
1379 /* Disable the LF */
1380 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1381 (lf << block->lfshift), 0x00ULL);
1382
1383 /* Update SW maintained mapping info as well */
1384 rvu_update_rsrc_map(rvu, pfvf, block,
1385 pcifunc, lf, false);
1386
1387 /* Free the resource */
1388 rvu_free_rsrc(&block->lf, lf);
1389
1390 /* Clear MSIX vector offset for this LF */
1391 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1392 }
1393 }
1394
rvu_detach_rsrcs(struct rvu * rvu,struct rsrc_detach * detach,u16 pcifunc)1395 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1396 u16 pcifunc)
1397 {
1398 struct rvu_hwinfo *hw = rvu->hw;
1399 bool detach_all = true;
1400 struct rvu_block *block;
1401 int blkid;
1402
1403 mutex_lock(&rvu->rsrc_lock);
1404
1405 /* Check for partial resource detach */
1406 if (detach && detach->partial)
1407 detach_all = false;
1408
1409 /* Check for RVU block's LFs attached to this func,
1410 * if so, detach them.
1411 */
1412 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1413 block = &hw->block[blkid];
1414 if (!block->lf.bmap)
1415 continue;
1416 if (!detach_all && detach) {
1417 if (blkid == BLKADDR_NPA && !detach->npalf)
1418 continue;
1419 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1420 continue;
1421 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1422 continue;
1423 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1424 continue;
1425 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1426 continue;
1427 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1428 continue;
1429 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1430 continue;
1431 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1432 continue;
1433 }
1434 rvu_detach_block(rvu, pcifunc, block->type);
1435 }
1436
1437 mutex_unlock(&rvu->rsrc_lock);
1438 return 0;
1439 }
1440
rvu_mbox_handler_detach_resources(struct rvu * rvu,struct rsrc_detach * detach,struct msg_rsp * rsp)1441 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1442 struct rsrc_detach *detach,
1443 struct msg_rsp *rsp)
1444 {
1445 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1446 }
1447
rvu_get_nix_blkaddr(struct rvu * rvu,u16 pcifunc)1448 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1449 {
1450 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1451 int blkaddr = BLKADDR_NIX0, vf;
1452 struct rvu_pfvf *pf;
1453
1454 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1455
1456 /* All CGX mapped PFs are set with assigned NIX block during init */
1457 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1458 blkaddr = pf->nix_blkaddr;
1459 } else if (is_afvf(pcifunc)) {
1460 vf = pcifunc - 1;
1461 /* Assign NIX based on VF number. All even numbered VFs get
1462 * NIX0 and odd numbered gets NIX1
1463 */
1464 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1465 /* NIX1 is not present on all silicons */
1466 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1467 blkaddr = BLKADDR_NIX0;
1468 }
1469
1470 /* if SDP1 then the blkaddr is NIX1 */
1471 if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1472 blkaddr = BLKADDR_NIX1;
1473
1474 switch (blkaddr) {
1475 case BLKADDR_NIX1:
1476 pfvf->nix_blkaddr = BLKADDR_NIX1;
1477 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1478 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1479 break;
1480 case BLKADDR_NIX0:
1481 default:
1482 pfvf->nix_blkaddr = BLKADDR_NIX0;
1483 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1484 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1485 break;
1486 }
1487
1488 return pfvf->nix_blkaddr;
1489 }
1490
rvu_get_attach_blkaddr(struct rvu * rvu,int blktype,u16 pcifunc,struct rsrc_attach * attach)1491 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1492 u16 pcifunc, struct rsrc_attach *attach)
1493 {
1494 int blkaddr;
1495
1496 switch (blktype) {
1497 case BLKTYPE_NIX:
1498 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1499 break;
1500 case BLKTYPE_CPT:
1501 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1502 return rvu_get_blkaddr(rvu, blktype, 0);
1503 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1504 BLKADDR_CPT0;
1505 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1506 return -ENODEV;
1507 break;
1508 default:
1509 return rvu_get_blkaddr(rvu, blktype, 0);
1510 }
1511
1512 if (is_block_implemented(rvu->hw, blkaddr))
1513 return blkaddr;
1514
1515 return -ENODEV;
1516 }
1517
rvu_attach_block(struct rvu * rvu,int pcifunc,int blktype,int num_lfs,struct rsrc_attach * attach)1518 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1519 int num_lfs, struct rsrc_attach *attach)
1520 {
1521 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1522 struct rvu_hwinfo *hw = rvu->hw;
1523 struct rvu_block *block;
1524 int slot, lf;
1525 int blkaddr;
1526 u64 cfg;
1527
1528 if (!num_lfs)
1529 return;
1530
1531 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1532 if (blkaddr < 0)
1533 return;
1534
1535 block = &hw->block[blkaddr];
1536 if (!block->lf.bmap)
1537 return;
1538
1539 for (slot = 0; slot < num_lfs; slot++) {
1540 /* Allocate the resource */
1541 lf = rvu_alloc_rsrc(&block->lf);
1542 if (lf < 0)
1543 return;
1544
1545 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1546 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1547 (lf << block->lfshift), cfg);
1548 rvu_update_rsrc_map(rvu, pfvf, block,
1549 pcifunc, lf, true);
1550
1551 /* Set start MSIX vector for this LF within this PF/VF */
1552 rvu_set_msix_offset(rvu, pfvf, block, lf);
1553 }
1554 }
1555
rvu_check_rsrc_availability(struct rvu * rvu,struct rsrc_attach * req,u16 pcifunc)1556 static int rvu_check_rsrc_availability(struct rvu *rvu,
1557 struct rsrc_attach *req, u16 pcifunc)
1558 {
1559 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1560 int free_lfs, mappedlfs, blkaddr;
1561 struct rvu_hwinfo *hw = rvu->hw;
1562 struct rvu_block *block;
1563
1564 /* Only one NPA LF can be attached */
1565 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1566 block = &hw->block[BLKADDR_NPA];
1567 free_lfs = rvu_rsrc_free_count(&block->lf);
1568 if (!free_lfs)
1569 goto fail;
1570 } else if (req->npalf) {
1571 dev_err(&rvu->pdev->dev,
1572 "Func 0x%x: Invalid req, already has NPA\n",
1573 pcifunc);
1574 return -EINVAL;
1575 }
1576
1577 /* Only one NIX LF can be attached */
1578 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1579 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1580 pcifunc, req);
1581 if (blkaddr < 0)
1582 return blkaddr;
1583 block = &hw->block[blkaddr];
1584 free_lfs = rvu_rsrc_free_count(&block->lf);
1585 if (!free_lfs)
1586 goto fail;
1587 } else if (req->nixlf) {
1588 dev_err(&rvu->pdev->dev,
1589 "Func 0x%x: Invalid req, already has NIX\n",
1590 pcifunc);
1591 return -EINVAL;
1592 }
1593
1594 if (req->sso) {
1595 block = &hw->block[BLKADDR_SSO];
1596 /* Is request within limits ? */
1597 if (req->sso > block->lf.max) {
1598 dev_err(&rvu->pdev->dev,
1599 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1600 pcifunc, req->sso, block->lf.max);
1601 return -EINVAL;
1602 }
1603 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1604 free_lfs = rvu_rsrc_free_count(&block->lf);
1605 /* Check if additional resources are available */
1606 if (req->sso > mappedlfs &&
1607 ((req->sso - mappedlfs) > free_lfs))
1608 goto fail;
1609 }
1610
1611 if (req->ssow) {
1612 block = &hw->block[BLKADDR_SSOW];
1613 if (req->ssow > block->lf.max) {
1614 dev_err(&rvu->pdev->dev,
1615 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1616 pcifunc, req->sso, block->lf.max);
1617 return -EINVAL;
1618 }
1619 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1620 free_lfs = rvu_rsrc_free_count(&block->lf);
1621 if (req->ssow > mappedlfs &&
1622 ((req->ssow - mappedlfs) > free_lfs))
1623 goto fail;
1624 }
1625
1626 if (req->timlfs) {
1627 block = &hw->block[BLKADDR_TIM];
1628 if (req->timlfs > block->lf.max) {
1629 dev_err(&rvu->pdev->dev,
1630 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1631 pcifunc, req->timlfs, block->lf.max);
1632 return -EINVAL;
1633 }
1634 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1635 free_lfs = rvu_rsrc_free_count(&block->lf);
1636 if (req->timlfs > mappedlfs &&
1637 ((req->timlfs - mappedlfs) > free_lfs))
1638 goto fail;
1639 }
1640
1641 if (req->cptlfs) {
1642 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1643 pcifunc, req);
1644 if (blkaddr < 0)
1645 return blkaddr;
1646 block = &hw->block[blkaddr];
1647 if (req->cptlfs > block->lf.max) {
1648 dev_err(&rvu->pdev->dev,
1649 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1650 pcifunc, req->cptlfs, block->lf.max);
1651 return -EINVAL;
1652 }
1653 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1654 free_lfs = rvu_rsrc_free_count(&block->lf);
1655 if (req->cptlfs > mappedlfs &&
1656 ((req->cptlfs - mappedlfs) > free_lfs))
1657 goto fail;
1658 }
1659
1660 return 0;
1661
1662 fail:
1663 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1664 return -ENOSPC;
1665 }
1666
rvu_attach_from_same_block(struct rvu * rvu,int blktype,struct rsrc_attach * attach)1667 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1668 struct rsrc_attach *attach)
1669 {
1670 int blkaddr, num_lfs;
1671
1672 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1673 attach->hdr.pcifunc, attach);
1674 if (blkaddr < 0)
1675 return false;
1676
1677 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1678 blkaddr);
1679 /* Requester already has LFs from given block ? */
1680 return !!num_lfs;
1681 }
1682
rvu_mbox_handler_attach_resources(struct rvu * rvu,struct rsrc_attach * attach,struct msg_rsp * rsp)1683 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1684 struct rsrc_attach *attach,
1685 struct msg_rsp *rsp)
1686 {
1687 u16 pcifunc = attach->hdr.pcifunc;
1688 int err;
1689
1690 /* If first request, detach all existing attached resources */
1691 if (!attach->modify)
1692 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1693
1694 mutex_lock(&rvu->rsrc_lock);
1695
1696 /* Check if the request can be accommodated */
1697 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1698 if (err)
1699 goto exit;
1700
1701 /* Now attach the requested resources */
1702 if (attach->npalf)
1703 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1704
1705 if (attach->nixlf)
1706 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1707
1708 if (attach->sso) {
1709 /* RVU func doesn't know which exact LF or slot is attached
1710 * to it, it always sees as slot 0,1,2. So for a 'modify'
1711 * request, simply detach all existing attached LFs/slots
1712 * and attach a fresh.
1713 */
1714 if (attach->modify)
1715 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1716 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1717 attach->sso, attach);
1718 }
1719
1720 if (attach->ssow) {
1721 if (attach->modify)
1722 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1723 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1724 attach->ssow, attach);
1725 }
1726
1727 if (attach->timlfs) {
1728 if (attach->modify)
1729 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1730 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1731 attach->timlfs, attach);
1732 }
1733
1734 if (attach->cptlfs) {
1735 if (attach->modify &&
1736 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1737 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1738 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1739 attach->cptlfs, attach);
1740 }
1741
1742 exit:
1743 mutex_unlock(&rvu->rsrc_lock);
1744 return err;
1745 }
1746
rvu_get_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,int blkaddr,int lf)1747 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1748 int blkaddr, int lf)
1749 {
1750 u16 vec;
1751
1752 if (lf < 0)
1753 return MSIX_VECTOR_INVALID;
1754
1755 for (vec = 0; vec < pfvf->msix.max; vec++) {
1756 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1757 return vec;
1758 }
1759 return MSIX_VECTOR_INVALID;
1760 }
1761
rvu_set_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,int lf)1762 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1763 struct rvu_block *block, int lf)
1764 {
1765 u16 nvecs, vec, offset;
1766 u64 cfg;
1767
1768 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1769 (lf << block->lfshift));
1770 nvecs = (cfg >> 12) & 0xFF;
1771
1772 /* Check and alloc MSIX vectors, must be contiguous */
1773 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1774 return;
1775
1776 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1777
1778 /* Config MSIX offset in LF */
1779 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1780 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1781
1782 /* Update the bitmap as well */
1783 for (vec = 0; vec < nvecs; vec++)
1784 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1785 }
1786
rvu_clear_msix_offset(struct rvu * rvu,struct rvu_pfvf * pfvf,struct rvu_block * block,int lf)1787 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1788 struct rvu_block *block, int lf)
1789 {
1790 u16 nvecs, vec, offset;
1791 u64 cfg;
1792
1793 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1794 (lf << block->lfshift));
1795 nvecs = (cfg >> 12) & 0xFF;
1796
1797 /* Clear MSIX offset in LF */
1798 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1799 (lf << block->lfshift), cfg & ~0x7FFULL);
1800
1801 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1802
1803 /* Update the mapping */
1804 for (vec = 0; vec < nvecs; vec++)
1805 pfvf->msix_lfmap[offset + vec] = 0;
1806
1807 /* Free the same in MSIX bitmap */
1808 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1809 }
1810
rvu_mbox_handler_msix_offset(struct rvu * rvu,struct msg_req * req,struct msix_offset_rsp * rsp)1811 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1812 struct msix_offset_rsp *rsp)
1813 {
1814 struct rvu_hwinfo *hw = rvu->hw;
1815 u16 pcifunc = req->hdr.pcifunc;
1816 struct rvu_pfvf *pfvf;
1817 int lf, slot, blkaddr;
1818
1819 pfvf = rvu_get_pfvf(rvu, pcifunc);
1820 if (!pfvf->msix.bmap)
1821 return 0;
1822
1823 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1824 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1825 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1826
1827 /* Get BLKADDR from which LFs are attached to pcifunc */
1828 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1829 if (blkaddr < 0) {
1830 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1831 } else {
1832 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1833 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1834 }
1835
1836 rsp->sso = pfvf->sso;
1837 for (slot = 0; slot < rsp->sso; slot++) {
1838 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1839 rsp->sso_msixoff[slot] =
1840 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1841 }
1842
1843 rsp->ssow = pfvf->ssow;
1844 for (slot = 0; slot < rsp->ssow; slot++) {
1845 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1846 rsp->ssow_msixoff[slot] =
1847 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1848 }
1849
1850 rsp->timlfs = pfvf->timlfs;
1851 for (slot = 0; slot < rsp->timlfs; slot++) {
1852 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1853 rsp->timlf_msixoff[slot] =
1854 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1855 }
1856
1857 rsp->cptlfs = pfvf->cptlfs;
1858 for (slot = 0; slot < rsp->cptlfs; slot++) {
1859 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1860 rsp->cptlf_msixoff[slot] =
1861 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1862 }
1863
1864 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1865 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1866 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1867 rsp->cpt1_lf_msixoff[slot] =
1868 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1869 }
1870
1871 return 0;
1872 }
1873
rvu_mbox_handler_free_rsrc_cnt(struct rvu * rvu,struct msg_req * req,struct free_rsrcs_rsp * rsp)1874 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1875 struct free_rsrcs_rsp *rsp)
1876 {
1877 struct rvu_hwinfo *hw = rvu->hw;
1878 struct rvu_block *block;
1879 struct nix_txsch *txsch;
1880 struct nix_hw *nix_hw;
1881
1882 mutex_lock(&rvu->rsrc_lock);
1883
1884 block = &hw->block[BLKADDR_NPA];
1885 rsp->npa = rvu_rsrc_free_count(&block->lf);
1886
1887 block = &hw->block[BLKADDR_NIX0];
1888 rsp->nix = rvu_rsrc_free_count(&block->lf);
1889
1890 block = &hw->block[BLKADDR_NIX1];
1891 rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1892
1893 block = &hw->block[BLKADDR_SSO];
1894 rsp->sso = rvu_rsrc_free_count(&block->lf);
1895
1896 block = &hw->block[BLKADDR_SSOW];
1897 rsp->ssow = rvu_rsrc_free_count(&block->lf);
1898
1899 block = &hw->block[BLKADDR_TIM];
1900 rsp->tim = rvu_rsrc_free_count(&block->lf);
1901
1902 block = &hw->block[BLKADDR_CPT0];
1903 rsp->cpt = rvu_rsrc_free_count(&block->lf);
1904
1905 block = &hw->block[BLKADDR_CPT1];
1906 rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1907
1908 if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1909 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1910 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1911 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1912 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1913 /* NIX1 */
1914 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1915 goto out;
1916 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1917 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1918 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1919 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1920 } else {
1921 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1922 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1923 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1924 rvu_rsrc_free_count(&txsch->schq);
1925
1926 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1927 rsp->schq[NIX_TXSCH_LVL_TL4] =
1928 rvu_rsrc_free_count(&txsch->schq);
1929
1930 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1931 rsp->schq[NIX_TXSCH_LVL_TL3] =
1932 rvu_rsrc_free_count(&txsch->schq);
1933
1934 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1935 rsp->schq[NIX_TXSCH_LVL_TL2] =
1936 rvu_rsrc_free_count(&txsch->schq);
1937
1938 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1939 goto out;
1940
1941 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1942 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1943 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1944 rvu_rsrc_free_count(&txsch->schq);
1945
1946 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1947 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1948 rvu_rsrc_free_count(&txsch->schq);
1949
1950 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1951 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1952 rvu_rsrc_free_count(&txsch->schq);
1953
1954 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1955 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1956 rvu_rsrc_free_count(&txsch->schq);
1957 }
1958
1959 rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1960 out:
1961 rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1962 mutex_unlock(&rvu->rsrc_lock);
1963
1964 return 0;
1965 }
1966
rvu_mbox_handler_vf_flr(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1967 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1968 struct msg_rsp *rsp)
1969 {
1970 u16 pcifunc = req->hdr.pcifunc;
1971 u16 vf, numvfs;
1972 u64 cfg;
1973
1974 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1975 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1976 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1977 numvfs = (cfg >> 12) & 0xFF;
1978
1979 if (vf && vf <= numvfs)
1980 __rvu_flr_handler(rvu, pcifunc);
1981 else
1982 return RVU_INVALID_VF_ID;
1983
1984 return 0;
1985 }
1986
rvu_mbox_handler_get_hw_cap(struct rvu * rvu,struct msg_req * req,struct get_hw_cap_rsp * rsp)1987 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1988 struct get_hw_cap_rsp *rsp)
1989 {
1990 struct rvu_hwinfo *hw = rvu->hw;
1991
1992 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1993 rsp->nix_shaping = hw->cap.nix_shaping;
1994
1995 return 0;
1996 }
1997
rvu_mbox_handler_set_vf_perm(struct rvu * rvu,struct set_vf_perm * req,struct msg_rsp * rsp)1998 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1999 struct msg_rsp *rsp)
2000 {
2001 struct rvu_hwinfo *hw = rvu->hw;
2002 u16 pcifunc = req->hdr.pcifunc;
2003 struct rvu_pfvf *pfvf;
2004 int blkaddr, nixlf;
2005 u16 target;
2006
2007 /* Only PF can add VF permissions */
2008 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2009 return -EOPNOTSUPP;
2010
2011 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2012 pfvf = rvu_get_pfvf(rvu, target);
2013
2014 if (req->flags & RESET_VF_PERM) {
2015 pfvf->flags &= RVU_CLEAR_VF_PERM;
2016 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2017 (req->flags & VF_TRUSTED)) {
2018 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2019 /* disable multicast and promisc entries */
2020 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2021 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2022 if (blkaddr < 0)
2023 return 0;
2024 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2025 target, 0);
2026 if (nixlf < 0)
2027 return 0;
2028 npc_enadis_default_mce_entry(rvu, target, nixlf,
2029 NIXLF_ALLMULTI_ENTRY,
2030 false);
2031 npc_enadis_default_mce_entry(rvu, target, nixlf,
2032 NIXLF_PROMISC_ENTRY,
2033 false);
2034 }
2035 }
2036
2037 return 0;
2038 }
2039
rvu_process_mbox_msg(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * req)2040 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2041 struct mbox_msghdr *req)
2042 {
2043 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2044
2045 /* Check if valid, if not reply with a invalid msg */
2046 if (req->sig != OTX2_MBOX_REQ_SIG)
2047 goto bad_message;
2048
2049 switch (req->id) {
2050 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
2051 case _id: { \
2052 struct _rsp_type *rsp; \
2053 int err; \
2054 \
2055 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
2056 mbox, devid, \
2057 sizeof(struct _rsp_type)); \
2058 /* some handlers should complete even if reply */ \
2059 /* could not be allocated */ \
2060 if (!rsp && \
2061 _id != MBOX_MSG_DETACH_RESOURCES && \
2062 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2063 _id != MBOX_MSG_VF_FLR) \
2064 return -ENOMEM; \
2065 if (rsp) { \
2066 rsp->hdr.id = _id; \
2067 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2068 rsp->hdr.pcifunc = req->pcifunc; \
2069 rsp->hdr.rc = 0; \
2070 } \
2071 \
2072 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2073 (struct _req_type *)req, \
2074 rsp); \
2075 if (rsp && err) \
2076 rsp->hdr.rc = err; \
2077 \
2078 trace_otx2_msg_process(mbox->pdev, _id, err); \
2079 return rsp ? err : -ENOMEM; \
2080 }
2081 MBOX_MESSAGES
2082 #undef M
2083
2084 bad_message:
2085 default:
2086 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2087 return -ENODEV;
2088 }
2089 }
2090
__rvu_mbox_handler(struct rvu_work * mwork,int type)2091 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2092 {
2093 struct rvu *rvu = mwork->rvu;
2094 int offset, err, id, devid;
2095 struct otx2_mbox_dev *mdev;
2096 struct mbox_hdr *req_hdr;
2097 struct mbox_msghdr *msg;
2098 struct mbox_wq_info *mw;
2099 struct otx2_mbox *mbox;
2100
2101 switch (type) {
2102 case TYPE_AFPF:
2103 mw = &rvu->afpf_wq_info;
2104 break;
2105 case TYPE_AFVF:
2106 mw = &rvu->afvf_wq_info;
2107 break;
2108 default:
2109 return;
2110 }
2111
2112 devid = mwork - mw->mbox_wrk;
2113 mbox = &mw->mbox;
2114 mdev = &mbox->dev[devid];
2115
2116 /* Process received mbox messages */
2117 req_hdr = mdev->mbase + mbox->rx_start;
2118 if (mw->mbox_wrk[devid].num_msgs == 0)
2119 return;
2120
2121 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2122
2123 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2124 msg = mdev->mbase + offset;
2125
2126 /* Set which PF/VF sent this message based on mbox IRQ */
2127 switch (type) {
2128 case TYPE_AFPF:
2129 msg->pcifunc &=
2130 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2131 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2132 break;
2133 case TYPE_AFVF:
2134 msg->pcifunc &=
2135 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2136 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2137 break;
2138 }
2139
2140 err = rvu_process_mbox_msg(mbox, devid, msg);
2141 if (!err) {
2142 offset = mbox->rx_start + msg->next_msgoff;
2143 continue;
2144 }
2145
2146 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2147 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2148 err, otx2_mbox_id2name(msg->id),
2149 msg->id, rvu_get_pf(msg->pcifunc),
2150 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2151 else
2152 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2153 err, otx2_mbox_id2name(msg->id),
2154 msg->id, devid);
2155 }
2156 mw->mbox_wrk[devid].num_msgs = 0;
2157
2158 /* Send mbox responses to VF/PF */
2159 otx2_mbox_msg_send(mbox, devid);
2160 }
2161
rvu_afpf_mbox_handler(struct work_struct * work)2162 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2163 {
2164 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2165
2166 __rvu_mbox_handler(mwork, TYPE_AFPF);
2167 }
2168
rvu_afvf_mbox_handler(struct work_struct * work)2169 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2170 {
2171 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2172
2173 __rvu_mbox_handler(mwork, TYPE_AFVF);
2174 }
2175
__rvu_mbox_up_handler(struct rvu_work * mwork,int type)2176 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2177 {
2178 struct rvu *rvu = mwork->rvu;
2179 struct otx2_mbox_dev *mdev;
2180 struct mbox_hdr *rsp_hdr;
2181 struct mbox_msghdr *msg;
2182 struct mbox_wq_info *mw;
2183 struct otx2_mbox *mbox;
2184 int offset, id, devid;
2185
2186 switch (type) {
2187 case TYPE_AFPF:
2188 mw = &rvu->afpf_wq_info;
2189 break;
2190 case TYPE_AFVF:
2191 mw = &rvu->afvf_wq_info;
2192 break;
2193 default:
2194 return;
2195 }
2196
2197 devid = mwork - mw->mbox_wrk_up;
2198 mbox = &mw->mbox_up;
2199 mdev = &mbox->dev[devid];
2200
2201 rsp_hdr = mdev->mbase + mbox->rx_start;
2202 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2203 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2204 return;
2205 }
2206
2207 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2208
2209 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2210 msg = mdev->mbase + offset;
2211
2212 if (msg->id >= MBOX_MSG_MAX) {
2213 dev_err(rvu->dev,
2214 "Mbox msg with unknown ID 0x%x\n", msg->id);
2215 goto end;
2216 }
2217
2218 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2219 dev_err(rvu->dev,
2220 "Mbox msg with wrong signature %x, ID 0x%x\n",
2221 msg->sig, msg->id);
2222 goto end;
2223 }
2224
2225 switch (msg->id) {
2226 case MBOX_MSG_CGX_LINK_EVENT:
2227 break;
2228 default:
2229 if (msg->rc)
2230 dev_err(rvu->dev,
2231 "Mbox msg response has err %d, ID 0x%x\n",
2232 msg->rc, msg->id);
2233 break;
2234 }
2235 end:
2236 offset = mbox->rx_start + msg->next_msgoff;
2237 mdev->msgs_acked++;
2238 }
2239 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2240
2241 otx2_mbox_reset(mbox, devid);
2242 }
2243
rvu_afpf_mbox_up_handler(struct work_struct * work)2244 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2245 {
2246 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2247
2248 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2249 }
2250
rvu_afvf_mbox_up_handler(struct work_struct * work)2251 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2252 {
2253 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2254
2255 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2256 }
2257
rvu_get_mbox_regions(struct rvu * rvu,void ** mbox_addr,int num,int type)2258 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2259 int num, int type)
2260 {
2261 struct rvu_hwinfo *hw = rvu->hw;
2262 int region;
2263 u64 bar4;
2264
2265 /* For cn10k platform VF mailbox regions of a PF follows after the
2266 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2267 * RVU_PF_VF_BAR4_ADDR register.
2268 */
2269 if (type == TYPE_AFVF) {
2270 for (region = 0; region < num; region++) {
2271 if (hw->cap.per_pf_mbox_regs) {
2272 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2273 RVU_AF_PFX_BAR4_ADDR(0)) +
2274 MBOX_SIZE;
2275 bar4 += region * MBOX_SIZE;
2276 } else {
2277 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2278 bar4 += region * MBOX_SIZE;
2279 }
2280 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2281 if (!mbox_addr[region])
2282 goto error;
2283 }
2284 return 0;
2285 }
2286
2287 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2288 * PF registers. Whereas for Octeontx2 it is read from
2289 * RVU_AF_PF_BAR4_ADDR register.
2290 */
2291 for (region = 0; region < num; region++) {
2292 if (hw->cap.per_pf_mbox_regs) {
2293 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2294 RVU_AF_PFX_BAR4_ADDR(region));
2295 } else {
2296 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2297 RVU_AF_PF_BAR4_ADDR);
2298 bar4 += region * MBOX_SIZE;
2299 }
2300 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2301 if (!mbox_addr[region])
2302 goto error;
2303 }
2304 return 0;
2305
2306 error:
2307 while (region--)
2308 iounmap((void __iomem *)mbox_addr[region]);
2309 return -ENOMEM;
2310 }
2311
rvu_mbox_init(struct rvu * rvu,struct mbox_wq_info * mw,int type,int num,void (mbox_handler)(struct work_struct *),void (mbox_up_handler)(struct work_struct *))2312 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2313 int type, int num,
2314 void (mbox_handler)(struct work_struct *),
2315 void (mbox_up_handler)(struct work_struct *))
2316 {
2317 int err = -EINVAL, i, dir, dir_up;
2318 void __iomem *reg_base;
2319 struct rvu_work *mwork;
2320 void **mbox_regions;
2321 const char *name;
2322
2323 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2324 if (!mbox_regions)
2325 return -ENOMEM;
2326
2327 switch (type) {
2328 case TYPE_AFPF:
2329 name = "rvu_afpf_mailbox";
2330 dir = MBOX_DIR_AFPF;
2331 dir_up = MBOX_DIR_AFPF_UP;
2332 reg_base = rvu->afreg_base;
2333 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2334 if (err)
2335 goto free_regions;
2336 break;
2337 case TYPE_AFVF:
2338 name = "rvu_afvf_mailbox";
2339 dir = MBOX_DIR_PFVF;
2340 dir_up = MBOX_DIR_PFVF_UP;
2341 reg_base = rvu->pfreg_base;
2342 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2343 if (err)
2344 goto free_regions;
2345 break;
2346 default:
2347 goto free_regions;
2348 }
2349
2350 mw->mbox_wq = alloc_workqueue(name,
2351 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2352 num);
2353 if (!mw->mbox_wq) {
2354 err = -ENOMEM;
2355 goto unmap_regions;
2356 }
2357
2358 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2359 sizeof(struct rvu_work), GFP_KERNEL);
2360 if (!mw->mbox_wrk) {
2361 err = -ENOMEM;
2362 goto exit;
2363 }
2364
2365 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2366 sizeof(struct rvu_work), GFP_KERNEL);
2367 if (!mw->mbox_wrk_up) {
2368 err = -ENOMEM;
2369 goto exit;
2370 }
2371
2372 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2373 reg_base, dir, num);
2374 if (err)
2375 goto exit;
2376
2377 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2378 reg_base, dir_up, num);
2379 if (err)
2380 goto exit;
2381
2382 for (i = 0; i < num; i++) {
2383 mwork = &mw->mbox_wrk[i];
2384 mwork->rvu = rvu;
2385 INIT_WORK(&mwork->work, mbox_handler);
2386
2387 mwork = &mw->mbox_wrk_up[i];
2388 mwork->rvu = rvu;
2389 INIT_WORK(&mwork->work, mbox_up_handler);
2390 }
2391 kfree(mbox_regions);
2392 return 0;
2393
2394 exit:
2395 destroy_workqueue(mw->mbox_wq);
2396 unmap_regions:
2397 while (num--)
2398 iounmap((void __iomem *)mbox_regions[num]);
2399 free_regions:
2400 kfree(mbox_regions);
2401 return err;
2402 }
2403
rvu_mbox_destroy(struct mbox_wq_info * mw)2404 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2405 {
2406 struct otx2_mbox *mbox = &mw->mbox;
2407 struct otx2_mbox_dev *mdev;
2408 int devid;
2409
2410 if (mw->mbox_wq) {
2411 destroy_workqueue(mw->mbox_wq);
2412 mw->mbox_wq = NULL;
2413 }
2414
2415 for (devid = 0; devid < mbox->ndevs; devid++) {
2416 mdev = &mbox->dev[devid];
2417 if (mdev->hwbase)
2418 iounmap((void __iomem *)mdev->hwbase);
2419 }
2420
2421 otx2_mbox_destroy(&mw->mbox);
2422 otx2_mbox_destroy(&mw->mbox_up);
2423 }
2424
rvu_queue_work(struct mbox_wq_info * mw,int first,int mdevs,u64 intr)2425 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2426 int mdevs, u64 intr)
2427 {
2428 struct otx2_mbox_dev *mdev;
2429 struct otx2_mbox *mbox;
2430 struct mbox_hdr *hdr;
2431 int i;
2432
2433 for (i = first; i < mdevs; i++) {
2434 /* start from 0 */
2435 if (!(intr & BIT_ULL(i - first)))
2436 continue;
2437
2438 mbox = &mw->mbox;
2439 mdev = &mbox->dev[i];
2440 hdr = mdev->mbase + mbox->rx_start;
2441
2442 /*The hdr->num_msgs is set to zero immediately in the interrupt
2443 * handler to ensure that it holds a correct value next time
2444 * when the interrupt handler is called.
2445 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2446 * pf>mbox.up_num_msgs holds the data for use in
2447 * pfaf_mbox_up_handler.
2448 */
2449
2450 if (hdr->num_msgs) {
2451 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2452 hdr->num_msgs = 0;
2453 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2454 }
2455 mbox = &mw->mbox_up;
2456 mdev = &mbox->dev[i];
2457 hdr = mdev->mbase + mbox->rx_start;
2458 if (hdr->num_msgs) {
2459 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2460 hdr->num_msgs = 0;
2461 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2462 }
2463 }
2464 }
2465
rvu_mbox_intr_handler(int irq,void * rvu_irq)2466 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2467 {
2468 struct rvu *rvu = (struct rvu *)rvu_irq;
2469 int vfs = rvu->vfs;
2470 u64 intr;
2471
2472 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2473 /* Clear interrupts */
2474 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2475 if (intr)
2476 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2477
2478 /* Sync with mbox memory region */
2479 rmb();
2480
2481 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2482
2483 /* Handle VF interrupts */
2484 if (vfs > 64) {
2485 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2486 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2487
2488 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2489 vfs -= 64;
2490 }
2491
2492 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2493 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2494 if (intr)
2495 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2496
2497 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2498
2499 return IRQ_HANDLED;
2500 }
2501
rvu_enable_mbox_intr(struct rvu * rvu)2502 static void rvu_enable_mbox_intr(struct rvu *rvu)
2503 {
2504 struct rvu_hwinfo *hw = rvu->hw;
2505
2506 /* Clear spurious irqs, if any */
2507 rvu_write64(rvu, BLKADDR_RVUM,
2508 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2509
2510 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2511 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2512 INTR_MASK(hw->total_pfs) & ~1ULL);
2513 }
2514
rvu_blklf_teardown(struct rvu * rvu,u16 pcifunc,u8 blkaddr)2515 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2516 {
2517 struct rvu_block *block;
2518 int slot, lf, num_lfs;
2519 int err;
2520
2521 block = &rvu->hw->block[blkaddr];
2522 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2523 block->addr);
2524 if (!num_lfs)
2525 return;
2526 for (slot = 0; slot < num_lfs; slot++) {
2527 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2528 if (lf < 0)
2529 continue;
2530
2531 /* Cleanup LF and reset it */
2532 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2533 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2534 else if (block->addr == BLKADDR_NPA)
2535 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2536 else if ((block->addr == BLKADDR_CPT0) ||
2537 (block->addr == BLKADDR_CPT1))
2538 rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2539 slot);
2540
2541 err = rvu_lf_reset(rvu, block, lf);
2542 if (err) {
2543 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2544 block->addr, lf);
2545 }
2546 }
2547 }
2548
__rvu_flr_handler(struct rvu * rvu,u16 pcifunc)2549 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2550 {
2551 mutex_lock(&rvu->flr_lock);
2552 /* Reset order should reflect inter-block dependencies:
2553 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2554 * 2. Flush and reset SSO/SSOW
2555 * 3. Cleanup pools (NPA)
2556 */
2557 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2558 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2559 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2560 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2561 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2562 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2563 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2564 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2565 rvu_reset_lmt_map_tbl(rvu, pcifunc);
2566 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2567 /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
2568 * entries, check and free the MCAM entries explicitly to avoid leak.
2569 * Since LF is detached use LF number as -1.
2570 */
2571 rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
2572
2573 mutex_unlock(&rvu->flr_lock);
2574 }
2575
rvu_afvf_flr_handler(struct rvu * rvu,int vf)2576 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2577 {
2578 int reg = 0;
2579
2580 /* pcifunc = 0(PF0) | (vf + 1) */
2581 __rvu_flr_handler(rvu, vf + 1);
2582
2583 if (vf >= 64) {
2584 reg = 1;
2585 vf = vf - 64;
2586 }
2587
2588 /* Signal FLR finish and enable IRQ */
2589 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2590 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2591 }
2592
rvu_flr_handler(struct work_struct * work)2593 static void rvu_flr_handler(struct work_struct *work)
2594 {
2595 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2596 struct rvu *rvu = flrwork->rvu;
2597 u16 pcifunc, numvfs, vf;
2598 u64 cfg;
2599 int pf;
2600
2601 pf = flrwork - rvu->flr_wrk;
2602 if (pf >= rvu->hw->total_pfs) {
2603 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2604 return;
2605 }
2606
2607 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2608 numvfs = (cfg >> 12) & 0xFF;
2609 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2610
2611 for (vf = 0; vf < numvfs; vf++)
2612 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2613
2614 __rvu_flr_handler(rvu, pcifunc);
2615
2616 /* Signal FLR finish */
2617 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2618
2619 /* Enable interrupt */
2620 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2621 }
2622
rvu_afvf_queue_flr_work(struct rvu * rvu,int start_vf,int numvfs)2623 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2624 {
2625 int dev, vf, reg = 0;
2626 u64 intr;
2627
2628 if (start_vf >= 64)
2629 reg = 1;
2630
2631 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2632 if (!intr)
2633 return;
2634
2635 for (vf = 0; vf < numvfs; vf++) {
2636 if (!(intr & BIT_ULL(vf)))
2637 continue;
2638 /* Clear and disable the interrupt */
2639 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2640 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2641
2642 dev = vf + start_vf + rvu->hw->total_pfs;
2643 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2644 }
2645 }
2646
rvu_flr_intr_handler(int irq,void * rvu_irq)2647 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2648 {
2649 struct rvu *rvu = (struct rvu *)rvu_irq;
2650 u64 intr;
2651 u8 pf;
2652
2653 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2654 if (!intr)
2655 goto afvf_flr;
2656
2657 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2658 if (intr & (1ULL << pf)) {
2659 /* clear interrupt */
2660 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2661 BIT_ULL(pf));
2662 /* Disable the interrupt */
2663 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2664 BIT_ULL(pf));
2665 /* PF is already dead do only AF related operations */
2666 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2667 }
2668 }
2669
2670 afvf_flr:
2671 rvu_afvf_queue_flr_work(rvu, 0, 64);
2672 if (rvu->vfs > 64)
2673 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2674
2675 return IRQ_HANDLED;
2676 }
2677
rvu_me_handle_vfset(struct rvu * rvu,int idx,u64 intr)2678 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2679 {
2680 int vf;
2681
2682 /* Nothing to be done here other than clearing the
2683 * TRPEND bit.
2684 */
2685 for (vf = 0; vf < 64; vf++) {
2686 if (intr & (1ULL << vf)) {
2687 /* clear the trpend due to ME(master enable) */
2688 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2689 /* clear interrupt */
2690 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2691 }
2692 }
2693 }
2694
2695 /* Handles ME interrupts from VFs of AF */
rvu_me_vf_intr_handler(int irq,void * rvu_irq)2696 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2697 {
2698 struct rvu *rvu = (struct rvu *)rvu_irq;
2699 int vfset;
2700 u64 intr;
2701
2702 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2703
2704 for (vfset = 0; vfset <= 1; vfset++) {
2705 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2706 if (intr)
2707 rvu_me_handle_vfset(rvu, vfset, intr);
2708 }
2709
2710 return IRQ_HANDLED;
2711 }
2712
2713 /* Handles ME interrupts from PFs */
rvu_me_pf_intr_handler(int irq,void * rvu_irq)2714 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2715 {
2716 struct rvu *rvu = (struct rvu *)rvu_irq;
2717 u64 intr;
2718 u8 pf;
2719
2720 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2721
2722 /* Nothing to be done here other than clearing the
2723 * TRPEND bit.
2724 */
2725 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2726 if (intr & (1ULL << pf)) {
2727 /* clear the trpend due to ME(master enable) */
2728 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2729 BIT_ULL(pf));
2730 /* clear interrupt */
2731 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2732 BIT_ULL(pf));
2733 }
2734 }
2735
2736 return IRQ_HANDLED;
2737 }
2738
rvu_unregister_interrupts(struct rvu * rvu)2739 static void rvu_unregister_interrupts(struct rvu *rvu)
2740 {
2741 int irq;
2742
2743 rvu_cpt_unregister_interrupts(rvu);
2744
2745 /* Disable the Mbox interrupt */
2746 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2747 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2748
2749 /* Disable the PF FLR interrupt */
2750 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2751 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2752
2753 /* Disable the PF ME interrupt */
2754 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2755 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2756
2757 for (irq = 0; irq < rvu->num_vec; irq++) {
2758 if (rvu->irq_allocated[irq]) {
2759 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2760 rvu->irq_allocated[irq] = false;
2761 }
2762 }
2763
2764 pci_free_irq_vectors(rvu->pdev);
2765 rvu->num_vec = 0;
2766 }
2767
rvu_afvf_msix_vectors_num_ok(struct rvu * rvu)2768 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2769 {
2770 struct rvu_pfvf *pfvf = &rvu->pf[0];
2771 int offset;
2772
2773 pfvf = &rvu->pf[0];
2774 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2775
2776 /* Make sure there are enough MSIX vectors configured so that
2777 * VF interrupts can be handled. Offset equal to zero means
2778 * that PF vectors are not configured and overlapping AF vectors.
2779 */
2780 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2781 offset;
2782 }
2783
rvu_register_interrupts(struct rvu * rvu)2784 static int rvu_register_interrupts(struct rvu *rvu)
2785 {
2786 int ret, offset, pf_vec_start;
2787
2788 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2789
2790 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2791 NAME_SIZE, GFP_KERNEL);
2792 if (!rvu->irq_name)
2793 return -ENOMEM;
2794
2795 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2796 sizeof(bool), GFP_KERNEL);
2797 if (!rvu->irq_allocated)
2798 return -ENOMEM;
2799
2800 /* Enable MSI-X */
2801 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2802 rvu->num_vec, PCI_IRQ_MSIX);
2803 if (ret < 0) {
2804 dev_err(rvu->dev,
2805 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2806 rvu->num_vec, ret);
2807 return ret;
2808 }
2809
2810 /* Register mailbox interrupt handler */
2811 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2812 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2813 rvu_mbox_intr_handler, 0,
2814 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2815 if (ret) {
2816 dev_err(rvu->dev,
2817 "RVUAF: IRQ registration failed for mbox irq\n");
2818 goto fail;
2819 }
2820
2821 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2822
2823 /* Enable mailbox interrupts from all PFs */
2824 rvu_enable_mbox_intr(rvu);
2825
2826 /* Register FLR interrupt handler */
2827 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2828 "RVUAF FLR");
2829 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2830 rvu_flr_intr_handler, 0,
2831 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2832 rvu);
2833 if (ret) {
2834 dev_err(rvu->dev,
2835 "RVUAF: IRQ registration failed for FLR\n");
2836 goto fail;
2837 }
2838 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2839
2840 /* Enable FLR interrupt for all PFs*/
2841 rvu_write64(rvu, BLKADDR_RVUM,
2842 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2843
2844 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2845 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2846
2847 /* Register ME interrupt handler */
2848 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2849 "RVUAF ME");
2850 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2851 rvu_me_pf_intr_handler, 0,
2852 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2853 rvu);
2854 if (ret) {
2855 dev_err(rvu->dev,
2856 "RVUAF: IRQ registration failed for ME\n");
2857 }
2858 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2859
2860 /* Clear TRPEND bit for all PF */
2861 rvu_write64(rvu, BLKADDR_RVUM,
2862 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2863 /* Enable ME interrupt for all PFs*/
2864 rvu_write64(rvu, BLKADDR_RVUM,
2865 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2866
2867 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2868 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2869
2870 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2871 return 0;
2872
2873 /* Get PF MSIX vectors offset. */
2874 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2875 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2876
2877 /* Register MBOX0 interrupt. */
2878 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2879 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2880 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2881 rvu_mbox_intr_handler, 0,
2882 &rvu->irq_name[offset * NAME_SIZE],
2883 rvu);
2884 if (ret)
2885 dev_err(rvu->dev,
2886 "RVUAF: IRQ registration failed for Mbox0\n");
2887
2888 rvu->irq_allocated[offset] = true;
2889
2890 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2891 * simply increment current offset by 1.
2892 */
2893 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2894 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2895 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2896 rvu_mbox_intr_handler, 0,
2897 &rvu->irq_name[offset * NAME_SIZE],
2898 rvu);
2899 if (ret)
2900 dev_err(rvu->dev,
2901 "RVUAF: IRQ registration failed for Mbox1\n");
2902
2903 rvu->irq_allocated[offset] = true;
2904
2905 /* Register FLR interrupt handler for AF's VFs */
2906 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2907 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2908 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2909 rvu_flr_intr_handler, 0,
2910 &rvu->irq_name[offset * NAME_SIZE], rvu);
2911 if (ret) {
2912 dev_err(rvu->dev,
2913 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2914 goto fail;
2915 }
2916 rvu->irq_allocated[offset] = true;
2917
2918 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2919 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2920 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2921 rvu_flr_intr_handler, 0,
2922 &rvu->irq_name[offset * NAME_SIZE], rvu);
2923 if (ret) {
2924 dev_err(rvu->dev,
2925 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2926 goto fail;
2927 }
2928 rvu->irq_allocated[offset] = true;
2929
2930 /* Register ME interrupt handler for AF's VFs */
2931 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2932 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2933 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2934 rvu_me_vf_intr_handler, 0,
2935 &rvu->irq_name[offset * NAME_SIZE], rvu);
2936 if (ret) {
2937 dev_err(rvu->dev,
2938 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2939 goto fail;
2940 }
2941 rvu->irq_allocated[offset] = true;
2942
2943 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2944 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2945 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2946 rvu_me_vf_intr_handler, 0,
2947 &rvu->irq_name[offset * NAME_SIZE], rvu);
2948 if (ret) {
2949 dev_err(rvu->dev,
2950 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2951 goto fail;
2952 }
2953 rvu->irq_allocated[offset] = true;
2954
2955 ret = rvu_cpt_register_interrupts(rvu);
2956 if (ret)
2957 goto fail;
2958
2959 return 0;
2960
2961 fail:
2962 rvu_unregister_interrupts(rvu);
2963 return ret;
2964 }
2965
rvu_flr_wq_destroy(struct rvu * rvu)2966 static void rvu_flr_wq_destroy(struct rvu *rvu)
2967 {
2968 if (rvu->flr_wq) {
2969 destroy_workqueue(rvu->flr_wq);
2970 rvu->flr_wq = NULL;
2971 }
2972 }
2973
rvu_flr_init(struct rvu * rvu)2974 static int rvu_flr_init(struct rvu *rvu)
2975 {
2976 int dev, num_devs;
2977 u64 cfg;
2978 int pf;
2979
2980 /* Enable FLR for all PFs*/
2981 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2982 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2983 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2984 cfg | BIT_ULL(22));
2985 }
2986
2987 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2988 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2989 1);
2990 if (!rvu->flr_wq)
2991 return -ENOMEM;
2992
2993 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2994 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2995 sizeof(struct rvu_work), GFP_KERNEL);
2996 if (!rvu->flr_wrk) {
2997 destroy_workqueue(rvu->flr_wq);
2998 return -ENOMEM;
2999 }
3000
3001 for (dev = 0; dev < num_devs; dev++) {
3002 rvu->flr_wrk[dev].rvu = rvu;
3003 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3004 }
3005
3006 mutex_init(&rvu->flr_lock);
3007
3008 return 0;
3009 }
3010
rvu_disable_afvf_intr(struct rvu * rvu)3011 static void rvu_disable_afvf_intr(struct rvu *rvu)
3012 {
3013 int vfs = rvu->vfs;
3014
3015 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3016 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3017 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3018 if (vfs <= 64)
3019 return;
3020
3021 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3022 INTR_MASK(vfs - 64));
3023 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3024 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3025 }
3026
rvu_enable_afvf_intr(struct rvu * rvu)3027 static void rvu_enable_afvf_intr(struct rvu *rvu)
3028 {
3029 int vfs = rvu->vfs;
3030
3031 /* Clear any pending interrupts and enable AF VF interrupts for
3032 * the first 64 VFs.
3033 */
3034 /* Mbox */
3035 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3036 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3037
3038 /* FLR */
3039 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3040 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3041 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3042
3043 /* Same for remaining VFs, if any. */
3044 if (vfs <= 64)
3045 return;
3046
3047 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3048 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3049 INTR_MASK(vfs - 64));
3050
3051 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3052 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3053 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3054 }
3055
rvu_get_num_lbk_chans(void)3056 int rvu_get_num_lbk_chans(void)
3057 {
3058 struct pci_dev *pdev;
3059 void __iomem *base;
3060 int ret = -EIO;
3061
3062 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3063 NULL);
3064 if (!pdev)
3065 goto err;
3066
3067 base = pci_ioremap_bar(pdev, 0);
3068 if (!base)
3069 goto err_put;
3070
3071 /* Read number of available LBK channels from LBK(0)_CONST register. */
3072 ret = (readq(base + 0x10) >> 32) & 0xffff;
3073 iounmap(base);
3074 err_put:
3075 pci_dev_put(pdev);
3076 err:
3077 return ret;
3078 }
3079
rvu_enable_sriov(struct rvu * rvu)3080 static int rvu_enable_sriov(struct rvu *rvu)
3081 {
3082 struct pci_dev *pdev = rvu->pdev;
3083 int err, chans, vfs;
3084
3085 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3086 dev_warn(&pdev->dev,
3087 "Skipping SRIOV enablement since not enough IRQs are available\n");
3088 return 0;
3089 }
3090
3091 chans = rvu_get_num_lbk_chans();
3092 if (chans < 0)
3093 return chans;
3094
3095 vfs = pci_sriov_get_totalvfs(pdev);
3096
3097 /* Limit VFs in case we have more VFs than LBK channels available. */
3098 if (vfs > chans)
3099 vfs = chans;
3100
3101 if (!vfs)
3102 return 0;
3103
3104 /* LBK channel number 63 is used for switching packets between
3105 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3106 */
3107 if (vfs > 62)
3108 vfs = 62;
3109
3110 /* Save VFs number for reference in VF interrupts handlers.
3111 * Since interrupts might start arriving during SRIOV enablement
3112 * ordinary API cannot be used to get number of enabled VFs.
3113 */
3114 rvu->vfs = vfs;
3115
3116 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3117 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3118 if (err)
3119 return err;
3120
3121 rvu_enable_afvf_intr(rvu);
3122 /* Make sure IRQs are enabled before SRIOV. */
3123 mb();
3124
3125 err = pci_enable_sriov(pdev, vfs);
3126 if (err) {
3127 rvu_disable_afvf_intr(rvu);
3128 rvu_mbox_destroy(&rvu->afvf_wq_info);
3129 return err;
3130 }
3131
3132 return 0;
3133 }
3134
rvu_disable_sriov(struct rvu * rvu)3135 static void rvu_disable_sriov(struct rvu *rvu)
3136 {
3137 rvu_disable_afvf_intr(rvu);
3138 rvu_mbox_destroy(&rvu->afvf_wq_info);
3139 pci_disable_sriov(rvu->pdev);
3140 }
3141
rvu_update_module_params(struct rvu * rvu)3142 static void rvu_update_module_params(struct rvu *rvu)
3143 {
3144 const char *default_pfl_name = "default";
3145
3146 strscpy(rvu->mkex_pfl_name,
3147 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3148 strscpy(rvu->kpu_pfl_name,
3149 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3150 }
3151
rvu_probe(struct pci_dev * pdev,const struct pci_device_id * id)3152 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3153 {
3154 struct device *dev = &pdev->dev;
3155 struct rvu *rvu;
3156 int err;
3157
3158 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3159 if (!rvu)
3160 return -ENOMEM;
3161
3162 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3163 if (!rvu->hw) {
3164 devm_kfree(dev, rvu);
3165 return -ENOMEM;
3166 }
3167
3168 pci_set_drvdata(pdev, rvu);
3169 rvu->pdev = pdev;
3170 rvu->dev = &pdev->dev;
3171
3172 err = pci_enable_device(pdev);
3173 if (err) {
3174 dev_err(dev, "Failed to enable PCI device\n");
3175 goto err_freemem;
3176 }
3177
3178 err = pci_request_regions(pdev, DRV_NAME);
3179 if (err) {
3180 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3181 goto err_disable_device;
3182 }
3183
3184 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3185 if (err) {
3186 dev_err(dev, "DMA mask config failed, abort\n");
3187 goto err_release_regions;
3188 }
3189
3190 pci_set_master(pdev);
3191
3192 rvu->ptp = ptp_get();
3193 if (IS_ERR(rvu->ptp)) {
3194 err = PTR_ERR(rvu->ptp);
3195 if (err == -EPROBE_DEFER)
3196 goto err_release_regions;
3197 rvu->ptp = NULL;
3198 }
3199
3200 /* Map Admin function CSRs */
3201 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3202 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3203 if (!rvu->afreg_base || !rvu->pfreg_base) {
3204 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3205 err = -ENOMEM;
3206 goto err_put_ptp;
3207 }
3208
3209 /* Store module params in rvu structure */
3210 rvu_update_module_params(rvu);
3211
3212 /* Check which blocks the HW supports */
3213 rvu_check_block_implemented(rvu);
3214
3215 rvu_reset_all_blocks(rvu);
3216
3217 rvu_setup_hw_capabilities(rvu);
3218
3219 err = rvu_setup_hw_resources(rvu);
3220 if (err)
3221 goto err_put_ptp;
3222
3223 /* Init mailbox btw AF and PFs */
3224 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3225 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3226 rvu_afpf_mbox_up_handler);
3227 if (err) {
3228 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3229 goto err_hwsetup;
3230 }
3231
3232 err = rvu_flr_init(rvu);
3233 if (err) {
3234 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3235 goto err_mbox;
3236 }
3237
3238 err = rvu_register_interrupts(rvu);
3239 if (err) {
3240 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3241 goto err_flr;
3242 }
3243
3244 err = rvu_register_dl(rvu);
3245 if (err) {
3246 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3247 goto err_irq;
3248 }
3249
3250 rvu_setup_rvum_blk_revid(rvu);
3251
3252 /* Enable AF's VFs (if any) */
3253 err = rvu_enable_sriov(rvu);
3254 if (err) {
3255 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3256 goto err_dl;
3257 }
3258
3259 /* Initialize debugfs */
3260 rvu_dbg_init(rvu);
3261
3262 mutex_init(&rvu->rswitch.switch_lock);
3263
3264 if (rvu->fwdata)
3265 ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3266 rvu->fwdata->ptp_ext_tstamp);
3267
3268 return 0;
3269 err_dl:
3270 rvu_unregister_dl(rvu);
3271 err_irq:
3272 rvu_unregister_interrupts(rvu);
3273 err_flr:
3274 rvu_flr_wq_destroy(rvu);
3275 err_mbox:
3276 rvu_mbox_destroy(&rvu->afpf_wq_info);
3277 err_hwsetup:
3278 rvu_cgx_exit(rvu);
3279 rvu_fwdata_exit(rvu);
3280 rvu_reset_all_blocks(rvu);
3281 rvu_free_hw_resources(rvu);
3282 rvu_clear_rvum_blk_revid(rvu);
3283 err_put_ptp:
3284 ptp_put(rvu->ptp);
3285 err_release_regions:
3286 pci_release_regions(pdev);
3287 err_disable_device:
3288 pci_disable_device(pdev);
3289 err_freemem:
3290 pci_set_drvdata(pdev, NULL);
3291 devm_kfree(&pdev->dev, rvu->hw);
3292 devm_kfree(dev, rvu);
3293 return err;
3294 }
3295
rvu_remove(struct pci_dev * pdev)3296 static void rvu_remove(struct pci_dev *pdev)
3297 {
3298 struct rvu *rvu = pci_get_drvdata(pdev);
3299
3300 rvu_dbg_exit(rvu);
3301 rvu_unregister_dl(rvu);
3302 rvu_unregister_interrupts(rvu);
3303 rvu_flr_wq_destroy(rvu);
3304 rvu_cgx_exit(rvu);
3305 rvu_fwdata_exit(rvu);
3306 rvu_mbox_destroy(&rvu->afpf_wq_info);
3307 rvu_disable_sriov(rvu);
3308 rvu_reset_all_blocks(rvu);
3309 rvu_free_hw_resources(rvu);
3310 rvu_clear_rvum_blk_revid(rvu);
3311 ptp_put(rvu->ptp);
3312 pci_release_regions(pdev);
3313 pci_disable_device(pdev);
3314 pci_set_drvdata(pdev, NULL);
3315
3316 devm_kfree(&pdev->dev, rvu->hw);
3317 devm_kfree(&pdev->dev, rvu);
3318 }
3319
3320 static struct pci_driver rvu_driver = {
3321 .name = DRV_NAME,
3322 .id_table = rvu_id_table,
3323 .probe = rvu_probe,
3324 .remove = rvu_remove,
3325 };
3326
rvu_init_module(void)3327 static int __init rvu_init_module(void)
3328 {
3329 int err;
3330
3331 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3332
3333 err = pci_register_driver(&cgx_driver);
3334 if (err < 0)
3335 return err;
3336
3337 err = pci_register_driver(&ptp_driver);
3338 if (err < 0)
3339 goto ptp_err;
3340
3341 err = pci_register_driver(&rvu_driver);
3342 if (err < 0)
3343 goto rvu_err;
3344
3345 return 0;
3346 rvu_err:
3347 pci_unregister_driver(&ptp_driver);
3348 ptp_err:
3349 pci_unregister_driver(&cgx_driver);
3350
3351 return err;
3352 }
3353
rvu_cleanup_module(void)3354 static void __exit rvu_cleanup_module(void)
3355 {
3356 pci_unregister_driver(&rvu_driver);
3357 pci_unregister_driver(&ptp_driver);
3358 pci_unregister_driver(&cgx_driver);
3359 }
3360
3361 module_init(rvu_init_module);
3362 module_exit(rvu_cleanup_module);
3363