1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell.
5 *
6 */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17
18 struct cgx_evq_entry {
19 struct list_head evq_node;
20 struct cgx_link_event link_event;
21 };
22
23 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
24 static struct _req_type __maybe_unused \
25 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
26 { \
27 struct _req_type *req; \
28 \
29 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
30 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
31 sizeof(struct _rsp_type)); \
32 if (!req) \
33 return NULL; \
34 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
35 req->hdr.id = _id; \
36 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
37 return req; \
38 }
39
40 MBOX_UP_CGX_MESSAGES
41 #undef M
42
is_mac_feature_supported(struct rvu * rvu,int pf,int feature)43 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
44 {
45 u8 cgx_id, lmac_id;
46 void *cgxd;
47
48 if (!is_pf_cgxmapped(rvu, pf))
49 return 0;
50
51 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
52 cgxd = rvu_cgx_pdata(cgx_id, rvu);
53
54 return (cgx_features_get(cgxd) & feature);
55 }
56
57 /* Returns bitmap of mapped PFs */
cgxlmac_to_pfmap(struct rvu * rvu,u8 cgx_id,u8 lmac_id)58 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
59 {
60 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
61 }
62
cgxlmac_to_pf(struct rvu * rvu,int cgx_id,int lmac_id)63 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
64 {
65 unsigned long pfmap;
66
67 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
68
69 /* Assumes only one pf mapped to a cgx lmac port */
70 if (!pfmap)
71 return -ENODEV;
72 else
73 return find_first_bit(&pfmap, 16);
74 }
75
cgxlmac_id_to_bmap(u8 cgx_id,u8 lmac_id)76 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
77 {
78 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
79 }
80
rvu_cgx_pdata(u8 cgx_id,struct rvu * rvu)81 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
82 {
83 if (cgx_id >= rvu->cgx_cnt_max)
84 return NULL;
85
86 return rvu->cgx_idmap[cgx_id];
87 }
88
89 /* Return first enabled CGX instance if none are enabled then return NULL */
rvu_first_cgx_pdata(struct rvu * rvu)90 void *rvu_first_cgx_pdata(struct rvu *rvu)
91 {
92 int first_enabled_cgx = 0;
93 void *cgxd = NULL;
94
95 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
96 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
97 if (cgxd)
98 break;
99 }
100
101 return cgxd;
102 }
103
104 /* Based on P2X connectivity find mapped NIX block for a PF */
rvu_map_cgx_nix_block(struct rvu * rvu,int pf,int cgx_id,int lmac_id)105 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
106 int cgx_id, int lmac_id)
107 {
108 struct rvu_pfvf *pfvf = &rvu->pf[pf];
109 u8 p2x;
110
111 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
112 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
113 pfvf->nix_blkaddr = BLKADDR_NIX0;
114 if (p2x == CMR_P2X_SEL_NIX1)
115 pfvf->nix_blkaddr = BLKADDR_NIX1;
116 }
117
rvu_map_cgx_lmac_pf(struct rvu * rvu)118 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
119 {
120 struct npc_pkind *pkind = &rvu->hw->pkind;
121 int cgx_cnt_max = rvu->cgx_cnt_max;
122 int pf = PF_CGXMAP_BASE;
123 unsigned long lmac_bmap;
124 int size, free_pkind;
125 int cgx, lmac, iter;
126 int numvfs, hwvfs;
127
128 if (!cgx_cnt_max)
129 return 0;
130
131 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
132 return -EINVAL;
133
134 /* Alloc map table
135 * An additional entry is required since PF id starts from 1 and
136 * hence entry at offset 0 is invalid.
137 */
138 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
139 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
140 if (!rvu->pf2cgxlmac_map)
141 return -ENOMEM;
142
143 /* Initialize all entries with an invalid cgx and lmac id */
144 memset(rvu->pf2cgxlmac_map, 0xFF, size);
145
146 /* Reverse map table */
147 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
148 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
149 GFP_KERNEL);
150 if (!rvu->cgxlmac2pf_map)
151 return -ENOMEM;
152
153 rvu->cgx_mapped_pfs = 0;
154 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
155 if (!rvu_cgx_pdata(cgx, rvu))
156 continue;
157 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
158 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
159 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
160 iter);
161 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
162 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
163 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
164 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
165 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
166 rvu->cgx_mapped_pfs++;
167 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
168 rvu->cgx_mapped_vfs += numvfs;
169 pf++;
170 }
171 }
172 return 0;
173 }
174
rvu_cgx_send_link_info(int cgx_id,int lmac_id,struct rvu * rvu)175 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
176 {
177 struct cgx_evq_entry *qentry;
178 unsigned long flags;
179 int err;
180
181 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
182 if (!qentry)
183 return -ENOMEM;
184
185 /* Lock the event queue before we read the local link status */
186 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
187 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
188 &qentry->link_event.link_uinfo);
189 qentry->link_event.cgx_id = cgx_id;
190 qentry->link_event.lmac_id = lmac_id;
191 if (err) {
192 kfree(qentry);
193 goto skip_add;
194 }
195 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
196 skip_add:
197 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
198
199 /* start worker to process the events */
200 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
201
202 return 0;
203 }
204
205 /* This is called from interrupt context and is expected to be atomic */
cgx_lmac_postevent(struct cgx_link_event * event,void * data)206 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
207 {
208 struct cgx_evq_entry *qentry;
209 struct rvu *rvu = data;
210
211 /* post event to the event queue */
212 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
213 if (!qentry)
214 return -ENOMEM;
215 qentry->link_event = *event;
216 spin_lock(&rvu->cgx_evq_lock);
217 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
218 spin_unlock(&rvu->cgx_evq_lock);
219
220 /* start worker to process the events */
221 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
222
223 return 0;
224 }
225
cgx_notify_pfs(struct cgx_link_event * event,struct rvu * rvu)226 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
227 {
228 struct cgx_link_user_info *linfo;
229 struct cgx_link_info_msg *msg;
230 unsigned long pfmap;
231 int err, pfid;
232
233 linfo = &event->link_uinfo;
234 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
235
236 do {
237 pfid = find_first_bit(&pfmap, 16);
238 clear_bit(pfid, &pfmap);
239
240 /* check if notification is enabled */
241 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
242 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
243 event->cgx_id, event->lmac_id,
244 linfo->link_up ? "UP" : "DOWN");
245 continue;
246 }
247
248 /* Send mbox message to PF */
249 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
250 if (!msg)
251 continue;
252 msg->link_info = *linfo;
253 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
254 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
255 if (err)
256 dev_warn(rvu->dev, "notification to pf %d failed\n",
257 pfid);
258 } while (pfmap);
259 }
260
cgx_evhandler_task(struct work_struct * work)261 static void cgx_evhandler_task(struct work_struct *work)
262 {
263 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
264 struct cgx_evq_entry *qentry;
265 struct cgx_link_event *event;
266 unsigned long flags;
267
268 do {
269 /* Dequeue an event */
270 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
271 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
272 struct cgx_evq_entry,
273 evq_node);
274 if (qentry)
275 list_del(&qentry->evq_node);
276 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
277 if (!qentry)
278 break; /* nothing more to process */
279
280 event = &qentry->link_event;
281
282 /* process event */
283 cgx_notify_pfs(event, rvu);
284 kfree(qentry);
285 } while (1);
286 }
287
cgx_lmac_event_handler_init(struct rvu * rvu)288 static int cgx_lmac_event_handler_init(struct rvu *rvu)
289 {
290 unsigned long lmac_bmap;
291 struct cgx_event_cb cb;
292 int cgx, lmac, err;
293 void *cgxd;
294
295 spin_lock_init(&rvu->cgx_evq_lock);
296 INIT_LIST_HEAD(&rvu->cgx_evq_head);
297 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
298 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
299 if (!rvu->cgx_evh_wq) {
300 dev_err(rvu->dev, "alloc workqueue failed");
301 return -ENOMEM;
302 }
303
304 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
305 cb.data = rvu;
306
307 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
308 cgxd = rvu_cgx_pdata(cgx, rvu);
309 if (!cgxd)
310 continue;
311 lmac_bmap = cgx_get_lmac_bmap(cgxd);
312 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
313 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
314 if (err)
315 dev_err(rvu->dev,
316 "%d:%d handler register failed\n",
317 cgx, lmac);
318 }
319 }
320
321 return 0;
322 }
323
rvu_cgx_wq_destroy(struct rvu * rvu)324 static void rvu_cgx_wq_destroy(struct rvu *rvu)
325 {
326 if (rvu->cgx_evh_wq) {
327 destroy_workqueue(rvu->cgx_evh_wq);
328 rvu->cgx_evh_wq = NULL;
329 }
330 }
331
rvu_cgx_init(struct rvu * rvu)332 int rvu_cgx_init(struct rvu *rvu)
333 {
334 int cgx, err;
335 void *cgxd;
336
337 /* CGX port id starts from 0 and are not necessarily contiguous
338 * Hence we allocate resources based on the maximum port id value.
339 */
340 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
341 if (!rvu->cgx_cnt_max) {
342 dev_info(rvu->dev, "No CGX devices found!\n");
343 return -ENODEV;
344 }
345
346 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
347 sizeof(void *), GFP_KERNEL);
348 if (!rvu->cgx_idmap)
349 return -ENOMEM;
350
351 /* Initialize the cgxdata table */
352 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
353 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
354
355 /* Map CGX LMAC interfaces to RVU PFs */
356 err = rvu_map_cgx_lmac_pf(rvu);
357 if (err)
358 return err;
359
360 /* Register for CGX events */
361 err = cgx_lmac_event_handler_init(rvu);
362 if (err)
363 return err;
364
365 mutex_init(&rvu->cgx_cfg_lock);
366
367 /* Ensure event handler registration is completed, before
368 * we turn on the links
369 */
370 mb();
371
372 /* Do link up for all CGX ports */
373 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
374 cgxd = rvu_cgx_pdata(cgx, rvu);
375 if (!cgxd)
376 continue;
377 err = cgx_lmac_linkup_start(cgxd);
378 if (err)
379 dev_err(rvu->dev,
380 "Link up process failed to start on cgx %d\n",
381 cgx);
382 }
383
384 return 0;
385 }
386
rvu_cgx_exit(struct rvu * rvu)387 int rvu_cgx_exit(struct rvu *rvu)
388 {
389 unsigned long lmac_bmap;
390 int cgx, lmac;
391 void *cgxd;
392
393 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
394 cgxd = rvu_cgx_pdata(cgx, rvu);
395 if (!cgxd)
396 continue;
397 lmac_bmap = cgx_get_lmac_bmap(cgxd);
398 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
399 cgx_lmac_evh_unregister(cgxd, lmac);
400 }
401
402 /* Ensure event handler unregister is completed */
403 mb();
404
405 rvu_cgx_wq_destroy(rvu);
406 return 0;
407 }
408
409 /* Most of the CGX configuration is restricted to the mapped PF only,
410 * VF's of mapped PF and other PFs are not allowed. This fn() checks
411 * whether a PFFUNC is permitted to do the config or not.
412 */
is_cgx_config_permitted(struct rvu * rvu,u16 pcifunc)413 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
414 {
415 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
416 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
417 return false;
418 return true;
419 }
420
rvu_cgx_enadis_rx_bp(struct rvu * rvu,int pf,bool enable)421 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
422 {
423 struct mac_ops *mac_ops;
424 u8 cgx_id, lmac_id;
425 void *cgxd;
426
427 if (!is_pf_cgxmapped(rvu, pf))
428 return;
429
430 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
431 cgxd = rvu_cgx_pdata(cgx_id, rvu);
432
433 mac_ops = get_mac_ops(cgxd);
434 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
435 if (enable)
436 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
437 else
438 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
439 }
440
rvu_cgx_config_rxtx(struct rvu * rvu,u16 pcifunc,bool start)441 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
442 {
443 int pf = rvu_get_pf(pcifunc);
444 struct mac_ops *mac_ops;
445 u8 cgx_id, lmac_id;
446 void *cgxd;
447
448 if (!is_cgx_config_permitted(rvu, pcifunc))
449 return LMAC_AF_ERR_PERM_DENIED;
450
451 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
452 cgxd = rvu_cgx_pdata(cgx_id, rvu);
453 mac_ops = get_mac_ops(cgxd);
454
455 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
456 }
457
rvu_cgx_config_tx(void * cgxd,int lmac_id,bool enable)458 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
459 {
460 struct mac_ops *mac_ops;
461
462 mac_ops = get_mac_ops(cgxd);
463 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
464 }
465
rvu_cgx_disable_dmac_entries(struct rvu * rvu,u16 pcifunc)466 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
467 {
468 int pf = rvu_get_pf(pcifunc);
469 int i = 0, lmac_count = 0;
470 u8 max_dmac_filters;
471 u8 cgx_id, lmac_id;
472 void *cgx_dev;
473
474 if (!is_cgx_config_permitted(rvu, pcifunc))
475 return;
476
477 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
478 cgx_dev = cgx_get_pdata(cgx_id);
479 lmac_count = cgx_get_lmac_cnt(cgx_dev);
480 max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
481
482 for (i = 0; i < max_dmac_filters; i++)
483 cgx_lmac_addr_del(cgx_id, lmac_id, i);
484
485 /* As cgx_lmac_addr_del does not clear entry for index 0
486 * so it needs to be done explicitly
487 */
488 cgx_lmac_addr_reset(cgx_id, lmac_id);
489 }
490
rvu_mbox_handler_cgx_start_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)491 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
492 struct msg_rsp *rsp)
493 {
494 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
495 return 0;
496 }
497
rvu_mbox_handler_cgx_stop_rxtx(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)498 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
499 struct msg_rsp *rsp)
500 {
501 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
502 return 0;
503 }
504
rvu_lmac_get_stats(struct rvu * rvu,struct msg_req * req,void * rsp)505 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
506 void *rsp)
507 {
508 int pf = rvu_get_pf(req->hdr.pcifunc);
509 struct mac_ops *mac_ops;
510 int stat = 0, err = 0;
511 u64 tx_stat, rx_stat;
512 u8 cgx_idx, lmac;
513 void *cgxd;
514
515 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
516 return LMAC_AF_ERR_PERM_DENIED;
517
518 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
519 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
520 mac_ops = get_mac_ops(cgxd);
521
522 /* Rx stats */
523 while (stat < mac_ops->rx_stats_cnt) {
524 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
525 if (err)
526 return err;
527 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
528 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
529 else
530 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
531 stat++;
532 }
533
534 /* Tx stats */
535 stat = 0;
536 while (stat < mac_ops->tx_stats_cnt) {
537 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
538 if (err)
539 return err;
540 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
541 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
542 else
543 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
544 stat++;
545 }
546 return 0;
547 }
548
rvu_mbox_handler_cgx_stats(struct rvu * rvu,struct msg_req * req,struct cgx_stats_rsp * rsp)549 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
550 struct cgx_stats_rsp *rsp)
551 {
552 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
553 }
554
rvu_mbox_handler_rpm_stats(struct rvu * rvu,struct msg_req * req,struct rpm_stats_rsp * rsp)555 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
556 struct rpm_stats_rsp *rsp)
557 {
558 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
559 }
560
rvu_mbox_handler_cgx_fec_stats(struct rvu * rvu,struct msg_req * req,struct cgx_fec_stats_rsp * rsp)561 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
562 struct msg_req *req,
563 struct cgx_fec_stats_rsp *rsp)
564 {
565 int pf = rvu_get_pf(req->hdr.pcifunc);
566 u8 cgx_idx, lmac;
567 void *cgxd;
568
569 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
570 return LMAC_AF_ERR_PERM_DENIED;
571 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
572
573 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
574 return cgx_get_fec_stats(cgxd, lmac, rsp);
575 }
576
rvu_mbox_handler_cgx_mac_addr_set(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)577 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
578 struct cgx_mac_addr_set_or_get *req,
579 struct cgx_mac_addr_set_or_get *rsp)
580 {
581 int pf = rvu_get_pf(req->hdr.pcifunc);
582 u8 cgx_id, lmac_id;
583
584 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
585 return -EPERM;
586
587 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
588
589 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
590
591 return 0;
592 }
593
rvu_mbox_handler_cgx_mac_addr_add(struct rvu * rvu,struct cgx_mac_addr_add_req * req,struct cgx_mac_addr_add_rsp * rsp)594 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
595 struct cgx_mac_addr_add_req *req,
596 struct cgx_mac_addr_add_rsp *rsp)
597 {
598 int pf = rvu_get_pf(req->hdr.pcifunc);
599 u8 cgx_id, lmac_id;
600 int rc = 0;
601
602 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
603 return -EPERM;
604
605 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
606 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
607 if (rc >= 0) {
608 rsp->index = rc;
609 return 0;
610 }
611
612 return rc;
613 }
614
rvu_mbox_handler_cgx_mac_addr_del(struct rvu * rvu,struct cgx_mac_addr_del_req * req,struct msg_rsp * rsp)615 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
616 struct cgx_mac_addr_del_req *req,
617 struct msg_rsp *rsp)
618 {
619 int pf = rvu_get_pf(req->hdr.pcifunc);
620 u8 cgx_id, lmac_id;
621
622 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
623 return -EPERM;
624
625 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
626 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
627 }
628
rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu * rvu,struct msg_req * req,struct cgx_max_dmac_entries_get_rsp * rsp)629 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
630 struct msg_req *req,
631 struct cgx_max_dmac_entries_get_rsp
632 *rsp)
633 {
634 int pf = rvu_get_pf(req->hdr.pcifunc);
635 u8 cgx_id, lmac_id;
636
637 /* If msg is received from PFs(which are not mapped to CGX LMACs)
638 * or VF then no entries are allocated for DMAC filters at CGX level.
639 * So returning zero.
640 */
641 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
642 rsp->max_dmac_filters = 0;
643 return 0;
644 }
645
646 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
647 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
648 return 0;
649 }
650
rvu_mbox_handler_cgx_mac_addr_get(struct rvu * rvu,struct cgx_mac_addr_set_or_get * req,struct cgx_mac_addr_set_or_get * rsp)651 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
652 struct cgx_mac_addr_set_or_get *req,
653 struct cgx_mac_addr_set_or_get *rsp)
654 {
655 int pf = rvu_get_pf(req->hdr.pcifunc);
656 u8 cgx_id, lmac_id;
657 int rc = 0, i;
658 u64 cfg;
659
660 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
661 return -EPERM;
662
663 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
664
665 rsp->hdr.rc = rc;
666 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
667 /* copy 48 bit mac address to req->mac_addr */
668 for (i = 0; i < ETH_ALEN; i++)
669 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
670 return 0;
671 }
672
rvu_mbox_handler_cgx_promisc_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)673 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
674 struct msg_rsp *rsp)
675 {
676 u16 pcifunc = req->hdr.pcifunc;
677 int pf = rvu_get_pf(pcifunc);
678 u8 cgx_id, lmac_id;
679
680 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
681 return -EPERM;
682
683 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
684
685 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
686 return 0;
687 }
688
rvu_mbox_handler_cgx_promisc_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)689 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
690 struct msg_rsp *rsp)
691 {
692 int pf = rvu_get_pf(req->hdr.pcifunc);
693 u8 cgx_id, lmac_id;
694
695 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
696 return -EPERM;
697
698 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
699
700 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
701 return 0;
702 }
703
rvu_cgx_ptp_rx_cfg(struct rvu * rvu,u16 pcifunc,bool enable)704 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
705 {
706 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
707 int pf = rvu_get_pf(pcifunc);
708 struct mac_ops *mac_ops;
709 u8 cgx_id, lmac_id;
710 void *cgxd;
711
712 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
713 return 0;
714
715 /* This msg is expected only from PFs that are mapped to CGX LMACs,
716 * if received from other PF/VF simply ACK, nothing to do.
717 */
718 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
719 !is_pf_cgxmapped(rvu, pf))
720 return -ENODEV;
721
722 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
723 cgxd = rvu_cgx_pdata(cgx_id, rvu);
724
725 mac_ops = get_mac_ops(cgxd);
726 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
727 /* If PTP is enabled then inform NPC that packets to be
728 * parsed by this PF will have their data shifted by 8 bytes
729 * and if PTP is disabled then no shift is required
730 */
731 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
732 return -EINVAL;
733 /* This flag is required to clean up CGX conf if app gets killed */
734 pfvf->hw_rx_tstamp_en = enable;
735
736 return 0;
737 }
738
rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)739 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
740 struct msg_rsp *rsp)
741 {
742 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
743 return -EPERM;
744
745 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
746 }
747
rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)748 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
749 struct msg_rsp *rsp)
750 {
751 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
752 }
753
rvu_cgx_config_linkevents(struct rvu * rvu,u16 pcifunc,bool en)754 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
755 {
756 int pf = rvu_get_pf(pcifunc);
757 u8 cgx_id, lmac_id;
758
759 if (!is_cgx_config_permitted(rvu, pcifunc))
760 return -EPERM;
761
762 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
763
764 if (en) {
765 set_bit(pf, &rvu->pf_notify_bmap);
766 /* Send the current link status to PF */
767 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
768 } else {
769 clear_bit(pf, &rvu->pf_notify_bmap);
770 }
771
772 return 0;
773 }
774
rvu_mbox_handler_cgx_start_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)775 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
776 struct msg_rsp *rsp)
777 {
778 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
779 return 0;
780 }
781
rvu_mbox_handler_cgx_stop_linkevents(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)782 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
783 struct msg_rsp *rsp)
784 {
785 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
786 return 0;
787 }
788
rvu_mbox_handler_cgx_get_linkinfo(struct rvu * rvu,struct msg_req * req,struct cgx_link_info_msg * rsp)789 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
790 struct cgx_link_info_msg *rsp)
791 {
792 u8 cgx_id, lmac_id;
793 int pf, err;
794
795 pf = rvu_get_pf(req->hdr.pcifunc);
796
797 if (!is_pf_cgxmapped(rvu, pf))
798 return -ENODEV;
799
800 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
801
802 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
803 &rsp->link_info);
804 return err;
805 }
806
rvu_mbox_handler_cgx_features_get(struct rvu * rvu,struct msg_req * req,struct cgx_features_info_msg * rsp)807 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
808 struct msg_req *req,
809 struct cgx_features_info_msg *rsp)
810 {
811 int pf = rvu_get_pf(req->hdr.pcifunc);
812 u8 cgx_idx, lmac;
813 void *cgxd;
814
815 if (!is_pf_cgxmapped(rvu, pf))
816 return 0;
817
818 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
819 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
820 rsp->lmac_features = cgx_features_get(cgxd);
821
822 return 0;
823 }
824
rvu_cgx_get_fifolen(struct rvu * rvu)825 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
826 {
827 struct mac_ops *mac_ops;
828 u32 fifo_len;
829
830 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
831 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
832
833 return fifo_len;
834 }
835
rvu_cgx_config_intlbk(struct rvu * rvu,u16 pcifunc,bool en)836 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
837 {
838 int pf = rvu_get_pf(pcifunc);
839 struct mac_ops *mac_ops;
840 u8 cgx_id, lmac_id;
841
842 if (!is_cgx_config_permitted(rvu, pcifunc))
843 return -EPERM;
844
845 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
846 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
847
848 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
849 lmac_id, en);
850 }
851
rvu_mbox_handler_cgx_intlbk_enable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)852 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
853 struct msg_rsp *rsp)
854 {
855 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
856 return 0;
857 }
858
rvu_mbox_handler_cgx_intlbk_disable(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)859 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
860 struct msg_rsp *rsp)
861 {
862 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
863 return 0;
864 }
865
rvu_cgx_cfg_pause_frm(struct rvu * rvu,u16 pcifunc,u8 tx_pause,u8 rx_pause)866 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
867 {
868 int pf = rvu_get_pf(pcifunc);
869 u8 rx_pfc = 0, tx_pfc = 0;
870 struct mac_ops *mac_ops;
871 u8 cgx_id, lmac_id;
872 void *cgxd;
873
874 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
875 return 0;
876
877 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
878 * if received from other PF/VF simply ACK, nothing to do.
879 */
880 if (!is_pf_cgxmapped(rvu, pf))
881 return LMAC_AF_ERR_PF_NOT_MAPPED;
882
883 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
884 cgxd = rvu_cgx_pdata(cgx_id, rvu);
885 mac_ops = get_mac_ops(cgxd);
886
887 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
888 if (tx_pfc || rx_pfc) {
889 dev_warn(rvu->dev,
890 "Can not configure 802.3X flow control as PFC frames are enabled");
891 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
892 }
893
894 mutex_lock(&rvu->rsrc_lock);
895 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
896 pcifunc & RVU_PFVF_FUNC_MASK)) {
897 mutex_unlock(&rvu->rsrc_lock);
898 return LMAC_AF_ERR_PERM_DENIED;
899 }
900 mutex_unlock(&rvu->rsrc_lock);
901
902 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
903 }
904
rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu * rvu,struct cgx_pause_frm_cfg * req,struct cgx_pause_frm_cfg * rsp)905 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
906 struct cgx_pause_frm_cfg *req,
907 struct cgx_pause_frm_cfg *rsp)
908 {
909 int pf = rvu_get_pf(req->hdr.pcifunc);
910 struct mac_ops *mac_ops;
911 u8 cgx_id, lmac_id;
912 int err = 0;
913 void *cgxd;
914
915 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
916 * if received from other PF/VF simply ACK, nothing to do.
917 */
918 if (!is_pf_cgxmapped(rvu, pf))
919 return -ENODEV;
920
921 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
922 cgxd = rvu_cgx_pdata(cgx_id, rvu);
923 mac_ops = get_mac_ops(cgxd);
924
925 if (req->set)
926 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
927 else
928 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
929
930 return err;
931 }
932
rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)933 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
934 struct msg_rsp *rsp)
935 {
936 int pf = rvu_get_pf(req->hdr.pcifunc);
937 u8 cgx_id, lmac_id;
938
939 if (!is_pf_cgxmapped(rvu, pf))
940 return LMAC_AF_ERR_PF_NOT_MAPPED;
941
942 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
943 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
944 }
945
946 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
947 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
948 */
rvu_cgx_nix_cuml_stats(struct rvu * rvu,void * cgxd,int lmac_id,int index,int rxtxflag,u64 * stat)949 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
950 int index, int rxtxflag, u64 *stat)
951 {
952 struct rvu_block *block;
953 int blkaddr;
954 u16 pcifunc;
955 int pf, lf;
956
957 *stat = 0;
958
959 if (!cgxd || !rvu)
960 return -EINVAL;
961
962 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
963 if (pf < 0)
964 return pf;
965
966 /* Assumes LF of a PF and all of its VF belongs to the same
967 * NIX block
968 */
969 pcifunc = pf << RVU_PFVF_PF_SHIFT;
970 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
971 if (blkaddr < 0)
972 return 0;
973 block = &rvu->hw->block[blkaddr];
974
975 for (lf = 0; lf < block->lf.max; lf++) {
976 /* Check if a lf is attached to this PF or one of its VFs */
977 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
978 ~RVU_PFVF_FUNC_MASK)))
979 continue;
980 if (rxtxflag == NIX_STATS_RX)
981 *stat += rvu_read64(rvu, blkaddr,
982 NIX_AF_LFX_RX_STATX(lf, index));
983 else
984 *stat += rvu_read64(rvu, blkaddr,
985 NIX_AF_LFX_TX_STATX(lf, index));
986 }
987
988 return 0;
989 }
990
rvu_cgx_start_stop_io(struct rvu * rvu,u16 pcifunc,bool start)991 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
992 {
993 struct rvu_pfvf *parent_pf, *pfvf;
994 int cgx_users, err = 0;
995
996 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
997 return 0;
998
999 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1000 pfvf = rvu_get_pfvf(rvu, pcifunc);
1001
1002 mutex_lock(&rvu->cgx_cfg_lock);
1003
1004 if (start && pfvf->cgx_in_use)
1005 goto exit; /* CGX is already started hence nothing to do */
1006 if (!start && !pfvf->cgx_in_use)
1007 goto exit; /* CGX is already stopped hence nothing to do */
1008
1009 if (start) {
1010 cgx_users = parent_pf->cgx_users;
1011 parent_pf->cgx_users++;
1012 } else {
1013 parent_pf->cgx_users--;
1014 cgx_users = parent_pf->cgx_users;
1015 }
1016
1017 /* Start CGX when first of all NIXLFs is started.
1018 * Stop CGX when last of all NIXLFs is stopped.
1019 */
1020 if (!cgx_users) {
1021 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1022 start);
1023 if (err) {
1024 dev_err(rvu->dev, "Unable to %s CGX\n",
1025 start ? "start" : "stop");
1026 /* Revert the usage count in case of error */
1027 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
1028 : parent_pf->cgx_users + 1;
1029 goto exit;
1030 }
1031 }
1032 pfvf->cgx_in_use = start;
1033 exit:
1034 mutex_unlock(&rvu->cgx_cfg_lock);
1035 return err;
1036 }
1037
rvu_mbox_handler_cgx_set_fec_param(struct rvu * rvu,struct fec_mode * req,struct fec_mode * rsp)1038 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1039 struct fec_mode *req,
1040 struct fec_mode *rsp)
1041 {
1042 int pf = rvu_get_pf(req->hdr.pcifunc);
1043 u8 cgx_id, lmac_id;
1044
1045 if (!is_pf_cgxmapped(rvu, pf))
1046 return -EPERM;
1047
1048 if (req->fec == OTX2_FEC_OFF)
1049 req->fec = OTX2_FEC_NONE;
1050 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1051 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1052 return 0;
1053 }
1054
rvu_mbox_handler_cgx_get_aux_link_info(struct rvu * rvu,struct msg_req * req,struct cgx_fw_data * rsp)1055 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1056 struct cgx_fw_data *rsp)
1057 {
1058 int pf = rvu_get_pf(req->hdr.pcifunc);
1059 u8 cgx_id, lmac_id;
1060
1061 if (!rvu->fwdata)
1062 return -ENXIO;
1063
1064 if (!is_pf_cgxmapped(rvu, pf))
1065 return -EPERM;
1066
1067 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1068
1069 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1070 sizeof(struct cgx_lmac_fwdata_s));
1071 return 0;
1072 }
1073
rvu_mbox_handler_cgx_set_link_mode(struct rvu * rvu,struct cgx_set_link_mode_req * req,struct cgx_set_link_mode_rsp * rsp)1074 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1075 struct cgx_set_link_mode_req *req,
1076 struct cgx_set_link_mode_rsp *rsp)
1077 {
1078 int pf = rvu_get_pf(req->hdr.pcifunc);
1079 u8 cgx_idx, lmac;
1080 void *cgxd;
1081
1082 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1083 return -EPERM;
1084
1085 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1086 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1087 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1088 return 0;
1089 }
1090
rvu_mbox_handler_cgx_mac_addr_reset(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)1091 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
1092 struct msg_rsp *rsp)
1093 {
1094 int pf = rvu_get_pf(req->hdr.pcifunc);
1095 u8 cgx_id, lmac_id;
1096
1097 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1098 return LMAC_AF_ERR_PERM_DENIED;
1099
1100 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1101 return cgx_lmac_addr_reset(cgx_id, lmac_id);
1102 }
1103
rvu_mbox_handler_cgx_mac_addr_update(struct rvu * rvu,struct cgx_mac_addr_update_req * req,struct msg_rsp * rsp)1104 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1105 struct cgx_mac_addr_update_req *req,
1106 struct msg_rsp *rsp)
1107 {
1108 int pf = rvu_get_pf(req->hdr.pcifunc);
1109 u8 cgx_id, lmac_id;
1110
1111 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1112 return LMAC_AF_ERR_PERM_DENIED;
1113
1114 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1115 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1116 }
1117
rvu_cgx_prio_flow_ctrl_cfg(struct rvu * rvu,u16 pcifunc,u8 tx_pause,u8 rx_pause,u16 pfc_en)1118 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1119 u8 rx_pause, u16 pfc_en)
1120 {
1121 int pf = rvu_get_pf(pcifunc);
1122 u8 rx_8023 = 0, tx_8023 = 0;
1123 struct mac_ops *mac_ops;
1124 u8 cgx_id, lmac_id;
1125 void *cgxd;
1126
1127 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1128 * if received from other PF/VF simply ACK, nothing to do.
1129 */
1130 if (!is_pf_cgxmapped(rvu, pf))
1131 return -ENODEV;
1132
1133 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1134 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1135 mac_ops = get_mac_ops(cgxd);
1136
1137 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1138 if (tx_8023 || rx_8023) {
1139 dev_warn(rvu->dev,
1140 "Can not configure PFC as 802.3X pause frames are enabled");
1141 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1142 }
1143
1144 mutex_lock(&rvu->rsrc_lock);
1145 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1146 pcifunc & RVU_PFVF_FUNC_MASK)) {
1147 mutex_unlock(&rvu->rsrc_lock);
1148 return LMAC_AF_ERR_PERM_DENIED;
1149 }
1150 mutex_unlock(&rvu->rsrc_lock);
1151
1152 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1153 }
1154
rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu * rvu,struct cgx_pfc_cfg * req,struct cgx_pfc_rsp * rsp)1155 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1156 struct cgx_pfc_cfg *req,
1157 struct cgx_pfc_rsp *rsp)
1158 {
1159 int pf = rvu_get_pf(req->hdr.pcifunc);
1160 struct mac_ops *mac_ops;
1161 u8 cgx_id, lmac_id;
1162 void *cgxd;
1163 int err;
1164
1165 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1166 * if received from other PF/VF simply ACK, nothing to do.
1167 */
1168 if (!is_pf_cgxmapped(rvu, pf))
1169 return -ENODEV;
1170
1171 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1172 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1173 mac_ops = get_mac_ops(cgxd);
1174
1175 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1176 req->rx_pause, req->pfc_en);
1177
1178 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1179 return err;
1180 }
1181