1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/phy.h>
16 #include <linux/of.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19 
20 #include "cgx.h"
21 #include "rvu.h"
22 #include "lmac_common.h"
23 
24 #define DRV_NAME	"Marvell-CGX/RPM"
25 #define DRV_STRING      "Marvell CGX/RPM Driver"
26 
27 static LIST_HEAD(cgx_list);
28 
29 /* Convert firmware speed encoding to user format(Mbps) */
30 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
31 	[CGX_LINK_NONE] = 0,
32 	[CGX_LINK_10M] = 10,
33 	[CGX_LINK_100M] = 100,
34 	[CGX_LINK_1G] = 1000,
35 	[CGX_LINK_2HG] = 2500,
36 	[CGX_LINK_5G] = 5000,
37 	[CGX_LINK_10G] = 10000,
38 	[CGX_LINK_20G] = 20000,
39 	[CGX_LINK_25G] = 25000,
40 	[CGX_LINK_40G] = 40000,
41 	[CGX_LINK_50G] = 50000,
42 	[CGX_LINK_80G] = 80000,
43 	[CGX_LINK_100G] = 100000,
44 };
45 
46 /* Convert firmware lmac type encoding to string */
47 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
48 	[LMAC_MODE_SGMII] = "SGMII",
49 	[LMAC_MODE_XAUI] = "XAUI",
50 	[LMAC_MODE_RXAUI] = "RXAUI",
51 	[LMAC_MODE_10G_R] = "10G_R",
52 	[LMAC_MODE_40G_R] = "40G_R",
53 	[LMAC_MODE_QSGMII] = "QSGMII",
54 	[LMAC_MODE_25G_R] = "25G_R",
55 	[LMAC_MODE_50G_R] = "50G_R",
56 	[LMAC_MODE_100G_R] = "100G_R",
57 	[LMAC_MODE_USXGMII] = "USXGMII",
58 };
59 
60 /* CGX PHY management internal APIs */
61 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
62 
63 /* Supported devices */
64 static const struct pci_device_id cgx_id_table[] = {
65 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
66 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
67 	{ 0, }  /* end of table */
68 };
69 
70 MODULE_DEVICE_TABLE(pci, cgx_id_table);
71 
is_dev_rpm(void * cgxd)72 static bool is_dev_rpm(void *cgxd)
73 {
74 	struct cgx *cgx = cgxd;
75 
76 	return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
77 }
78 
is_lmac_valid(struct cgx * cgx,int lmac_id)79 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
80 {
81 	if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
82 		return false;
83 	return test_bit(lmac_id, &cgx->lmac_bmap);
84 }
85 
86 /* Helper function to get sequential index
87  * given the enabled LMAC of a CGX
88  */
get_sequence_id_of_lmac(struct cgx * cgx,int lmac_id)89 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
90 {
91 	int tmp, id = 0;
92 
93 	for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
94 		if (tmp == lmac_id)
95 			break;
96 		id++;
97 	}
98 
99 	return id;
100 }
101 
get_mac_ops(void * cgxd)102 struct mac_ops *get_mac_ops(void *cgxd)
103 {
104 	if (!cgxd)
105 		return cgxd;
106 
107 	return ((struct cgx *)cgxd)->mac_ops;
108 }
109 
cgx_write(struct cgx * cgx,u64 lmac,u64 offset,u64 val)110 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
111 {
112 	writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
113 	       offset);
114 }
115 
cgx_read(struct cgx * cgx,u64 lmac,u64 offset)116 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
117 {
118 	return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
119 		     offset);
120 }
121 
lmac_pdata(u8 lmac_id,struct cgx * cgx)122 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
123 {
124 	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
125 		return NULL;
126 
127 	return cgx->lmac_idmap[lmac_id];
128 }
129 
cgx_get_cgxcnt_max(void)130 int cgx_get_cgxcnt_max(void)
131 {
132 	struct cgx *cgx_dev;
133 	int idmax = -ENODEV;
134 
135 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
136 		if (cgx_dev->cgx_id > idmax)
137 			idmax = cgx_dev->cgx_id;
138 
139 	if (idmax < 0)
140 		return 0;
141 
142 	return idmax + 1;
143 }
144 
cgx_get_lmac_cnt(void * cgxd)145 int cgx_get_lmac_cnt(void *cgxd)
146 {
147 	struct cgx *cgx = cgxd;
148 
149 	if (!cgx)
150 		return -ENODEV;
151 
152 	return cgx->lmac_count;
153 }
154 
cgx_get_pdata(int cgx_id)155 void *cgx_get_pdata(int cgx_id)
156 {
157 	struct cgx *cgx_dev;
158 
159 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
160 		if (cgx_dev->cgx_id == cgx_id)
161 			return cgx_dev;
162 	}
163 	return NULL;
164 }
165 
cgx_lmac_write(int cgx_id,int lmac_id,u64 offset,u64 val)166 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
167 {
168 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
169 
170 	cgx_write(cgx_dev, lmac_id, offset, val);
171 }
172 
cgx_lmac_read(int cgx_id,int lmac_id,u64 offset)173 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
174 {
175 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
176 
177 	return cgx_read(cgx_dev, lmac_id, offset);
178 }
179 
cgx_get_cgxid(void * cgxd)180 int cgx_get_cgxid(void *cgxd)
181 {
182 	struct cgx *cgx = cgxd;
183 
184 	if (!cgx)
185 		return -EINVAL;
186 
187 	return cgx->cgx_id;
188 }
189 
cgx_lmac_get_p2x(int cgx_id,int lmac_id)190 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
191 {
192 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
193 	u64 cfg;
194 
195 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
196 
197 	return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
198 }
199 
200 /* Ensure the required lock for event queue(where asynchronous events are
201  * posted) is acquired before calling this API. Else an asynchronous event(with
202  * latest link status) can reach the destination before this function returns
203  * and could make the link status appear wrong.
204  */
cgx_get_link_info(void * cgxd,int lmac_id,struct cgx_link_user_info * linfo)205 int cgx_get_link_info(void *cgxd, int lmac_id,
206 		      struct cgx_link_user_info *linfo)
207 {
208 	struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
209 
210 	if (!lmac)
211 		return -ENODEV;
212 
213 	*linfo = lmac->link_info;
214 	return 0;
215 }
216 
mac2u64(u8 * mac_addr)217 static u64 mac2u64 (u8 *mac_addr)
218 {
219 	u64 mac = 0;
220 	int index;
221 
222 	for (index = ETH_ALEN - 1; index >= 0; index--)
223 		mac |= ((u64)*mac_addr++) << (8 * index);
224 	return mac;
225 }
226 
cfg2mac(u64 cfg,u8 * mac_addr)227 static void cfg2mac(u64 cfg, u8 *mac_addr)
228 {
229 	int i, index = 0;
230 
231 	for (i = ETH_ALEN - 1; i >= 0; i--, index++)
232 		mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
233 }
234 
cgx_lmac_addr_set(u8 cgx_id,u8 lmac_id,u8 * mac_addr)235 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
236 {
237 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
238 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
239 	struct mac_ops *mac_ops;
240 	int index, id;
241 	u64 cfg;
242 
243 	/* access mac_ops to know csr_offset */
244 	mac_ops = cgx_dev->mac_ops;
245 
246 	/* copy 6bytes from macaddr */
247 	/* memcpy(&cfg, mac_addr, 6); */
248 
249 	cfg = mac2u64 (mac_addr);
250 
251 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
252 
253 	index = id * lmac->mac_to_index_bmap.max;
254 
255 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
256 		  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
257 
258 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
259 	cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
260 		CGX_DMAC_MCAST_MODE);
261 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
262 
263 	return 0;
264 }
265 
cgx_read_dmac_ctrl(void * cgxd,int lmac_id)266 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
267 {
268 	struct mac_ops *mac_ops;
269 	struct cgx *cgx = cgxd;
270 
271 	if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
272 		return 0;
273 
274 	cgx = cgxd;
275 	/* Get mac_ops to know csr offset */
276 	mac_ops = cgx->mac_ops;
277 
278 	return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
279 }
280 
cgx_read_dmac_entry(void * cgxd,int index)281 u64 cgx_read_dmac_entry(void *cgxd, int index)
282 {
283 	struct mac_ops *mac_ops;
284 	struct cgx *cgx;
285 
286 	if (!cgxd)
287 		return 0;
288 
289 	cgx = cgxd;
290 	mac_ops = cgx->mac_ops;
291 	return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
292 }
293 
cgx_lmac_addr_add(u8 cgx_id,u8 lmac_id,u8 * mac_addr)294 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
295 {
296 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
297 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
298 	struct mac_ops *mac_ops;
299 	int index, idx;
300 	u64 cfg = 0;
301 	int id;
302 
303 	if (!lmac)
304 		return -ENODEV;
305 
306 	mac_ops = cgx_dev->mac_ops;
307 	/* Get available index where entry is to be installed */
308 	idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
309 	if (idx < 0)
310 		return idx;
311 
312 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
313 
314 	index = id * lmac->mac_to_index_bmap.max + idx;
315 
316 	cfg = mac2u64 (mac_addr);
317 	cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
318 	cfg |= ((u64)lmac_id << 49);
319 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
320 
321 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
322 	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
323 
324 	if (is_multicast_ether_addr(mac_addr)) {
325 		cfg &= ~GENMASK_ULL(2, 1);
326 		cfg |= CGX_DMAC_MCAST_MODE_CAM;
327 		lmac->mcast_filters_count++;
328 	} else if (!lmac->mcast_filters_count) {
329 		cfg |= CGX_DMAC_MCAST_MODE;
330 	}
331 
332 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
333 
334 	return idx;
335 }
336 
cgx_lmac_addr_reset(u8 cgx_id,u8 lmac_id)337 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
338 {
339 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
340 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
341 	struct mac_ops *mac_ops;
342 	u8 index = 0, id;
343 	u64 cfg;
344 
345 	if (!lmac)
346 		return -ENODEV;
347 
348 	mac_ops = cgx_dev->mac_ops;
349 	/* Restore index 0 to its default init value as done during
350 	 * cgx_lmac_init
351 	 */
352 	set_bit(0, lmac->mac_to_index_bmap.bmap);
353 
354 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
355 
356 	index = id * lmac->mac_to_index_bmap.max + index;
357 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
358 
359 	/* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
360 	cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
361 	cfg &= ~CGX_DMAC_CAM_ACCEPT;
362 	cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
363 	cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
364 
365 	return 0;
366 }
367 
368 /* Allows caller to change macaddress associated with index
369  * in dmac filter table including index 0 reserved for
370  * interface mac address
371  */
cgx_lmac_addr_update(u8 cgx_id,u8 lmac_id,u8 * mac_addr,u8 index)372 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
373 {
374 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
375 	struct mac_ops *mac_ops;
376 	struct lmac *lmac;
377 	u64 cfg;
378 	int id;
379 
380 	lmac = lmac_pdata(lmac_id, cgx_dev);
381 	if (!lmac)
382 		return -ENODEV;
383 
384 	mac_ops = cgx_dev->mac_ops;
385 	/* Validate the index */
386 	if (index >= lmac->mac_to_index_bmap.max)
387 		return -EINVAL;
388 
389 	/* ensure index is already set */
390 	if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
391 		return -EINVAL;
392 
393 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
394 
395 	index = id * lmac->mac_to_index_bmap.max + index;
396 
397 	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
398 	cfg &= ~CGX_RX_DMAC_ADR_MASK;
399 	cfg |= mac2u64 (mac_addr);
400 
401 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
402 	return 0;
403 }
404 
cgx_lmac_addr_del(u8 cgx_id,u8 lmac_id,u8 index)405 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
406 {
407 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
408 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
409 	struct mac_ops *mac_ops;
410 	u8 mac[ETH_ALEN];
411 	u64 cfg;
412 	int id;
413 
414 	if (!lmac)
415 		return -ENODEV;
416 
417 	mac_ops = cgx_dev->mac_ops;
418 	/* Validate the index */
419 	if (index >= lmac->mac_to_index_bmap.max)
420 		return -EINVAL;
421 
422 	/* Skip deletion for reserved index i.e. index 0 */
423 	if (index == 0)
424 		return 0;
425 
426 	rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
427 
428 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
429 
430 	index = id * lmac->mac_to_index_bmap.max + index;
431 
432 	/* Read MAC address to check whether it is ucast or mcast */
433 	cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
434 
435 	cfg2mac(cfg, mac);
436 	if (is_multicast_ether_addr(mac))
437 		lmac->mcast_filters_count--;
438 
439 	if (!lmac->mcast_filters_count) {
440 		cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
441 		cfg &= ~GENMASK_ULL(2, 1);
442 		cfg |= CGX_DMAC_MCAST_MODE;
443 		cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
444 	}
445 
446 	cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
447 
448 	return 0;
449 }
450 
cgx_lmac_addr_max_entries_get(u8 cgx_id,u8 lmac_id)451 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
452 {
453 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
454 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
455 
456 	if (lmac)
457 		return lmac->mac_to_index_bmap.max;
458 
459 	return 0;
460 }
461 
cgx_lmac_addr_get(u8 cgx_id,u8 lmac_id)462 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
463 {
464 	struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
465 	struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
466 	struct mac_ops *mac_ops;
467 	int index;
468 	u64 cfg;
469 	int id;
470 
471 	mac_ops = cgx_dev->mac_ops;
472 
473 	id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
474 
475 	index = id * lmac->mac_to_index_bmap.max;
476 
477 	cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
478 	return cfg & CGX_RX_DMAC_ADR_MASK;
479 }
480 
cgx_set_pkind(void * cgxd,u8 lmac_id,int pkind)481 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
482 {
483 	struct cgx *cgx = cgxd;
484 
485 	if (!is_lmac_valid(cgx, lmac_id))
486 		return -ENODEV;
487 
488 	cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
489 	return 0;
490 }
491 
cgx_get_lmac_type(void * cgxd,int lmac_id)492 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
493 {
494 	struct cgx *cgx = cgxd;
495 	u64 cfg;
496 
497 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
498 	return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
499 }
500 
501 /* Configure CGX LMAC in internal loopback mode */
cgx_lmac_internal_loopback(void * cgxd,int lmac_id,bool enable)502 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
503 {
504 	struct cgx *cgx = cgxd;
505 	u8 lmac_type;
506 	u64 cfg;
507 
508 	if (!is_lmac_valid(cgx, lmac_id))
509 		return -ENODEV;
510 
511 	lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
512 	if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
513 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
514 		if (enable)
515 			cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
516 		else
517 			cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
518 		cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
519 	} else {
520 		cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
521 		if (enable)
522 			cfg |= CGXX_SPUX_CONTROL1_LBK;
523 		else
524 			cfg &= ~CGXX_SPUX_CONTROL1_LBK;
525 		cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
526 	}
527 	return 0;
528 }
529 
cgx_lmac_promisc_config(int cgx_id,int lmac_id,bool enable)530 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
531 {
532 	struct cgx *cgx = cgx_get_pdata(cgx_id);
533 	struct lmac *lmac = lmac_pdata(lmac_id, cgx);
534 	u16 max_dmac = lmac->mac_to_index_bmap.max;
535 	struct mac_ops *mac_ops;
536 	int index, i;
537 	u64 cfg = 0;
538 	int id;
539 
540 	if (!cgx)
541 		return;
542 
543 	id = get_sequence_id_of_lmac(cgx, lmac_id);
544 
545 	mac_ops = cgx->mac_ops;
546 	if (enable) {
547 		/* Enable promiscuous mode on LMAC */
548 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
549 		cfg &= ~CGX_DMAC_CAM_ACCEPT;
550 		cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
551 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
552 
553 		for (i = 0; i < max_dmac; i++) {
554 			index = id * max_dmac + i;
555 			cfg = cgx_read(cgx, 0,
556 				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
557 			cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
558 			cgx_write(cgx, 0,
559 				  (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
560 		}
561 	} else {
562 		/* Disable promiscuous mode */
563 		cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
564 		cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
565 		cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
566 		for (i = 0; i < max_dmac; i++) {
567 			index = id * max_dmac + i;
568 			cfg = cgx_read(cgx, 0,
569 				       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
570 			if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
571 				cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
572 				cgx_write(cgx, 0,
573 					  (CGXX_CMRX_RX_DMAC_CAM0 +
574 					   index * 0x8),
575 					  cfg);
576 			}
577 		}
578 	}
579 }
580 
cgx_lmac_get_pause_frm_status(void * cgxd,int lmac_id,u8 * tx_pause,u8 * rx_pause)581 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
582 					 u8 *tx_pause, u8 *rx_pause)
583 {
584 	struct cgx *cgx = cgxd;
585 	u64 cfg;
586 
587 	if (is_dev_rpm(cgx))
588 		return 0;
589 
590 	if (!is_lmac_valid(cgx, lmac_id))
591 		return -ENODEV;
592 
593 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
594 	*rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
595 
596 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
597 	*tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
598 	return 0;
599 }
600 
601 /* Enable or disable forwarding received pause frames to Tx block */
cgx_lmac_enadis_rx_pause_fwding(void * cgxd,int lmac_id,bool enable)602 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
603 {
604 	struct cgx *cgx = cgxd;
605 	u8 rx_pause, tx_pause;
606 	bool is_pfc_enabled;
607 	struct lmac *lmac;
608 	u64 cfg;
609 
610 	if (!cgx)
611 		return;
612 
613 	lmac = lmac_pdata(lmac_id, cgx);
614 	if (!lmac)
615 		return;
616 
617 	/* Pause frames are not enabled just return */
618 	if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
619 		return;
620 
621 	cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
622 	is_pfc_enabled = rx_pause ? false : true;
623 
624 	if (enable) {
625 		if (!is_pfc_enabled) {
626 			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
627 			cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
628 			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
629 
630 			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
631 			cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
632 			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
633 		} else {
634 			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
635 			cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
636 			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
637 		}
638 	} else {
639 
640 		if (!is_pfc_enabled) {
641 			cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
642 			cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
643 			cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
644 
645 			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
646 			cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
647 			cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
648 		} else {
649 			cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
650 			cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
651 			cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
652 		}
653 	}
654 }
655 
cgx_get_rx_stats(void * cgxd,int lmac_id,int idx,u64 * rx_stat)656 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
657 {
658 	struct cgx *cgx = cgxd;
659 
660 	if (!is_lmac_valid(cgx, lmac_id))
661 		return -ENODEV;
662 	*rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
663 	return 0;
664 }
665 
cgx_get_tx_stats(void * cgxd,int lmac_id,int idx,u64 * tx_stat)666 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
667 {
668 	struct cgx *cgx = cgxd;
669 
670 	if (!is_lmac_valid(cgx, lmac_id))
671 		return -ENODEV;
672 	*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
673 	return 0;
674 }
675 
cgx_features_get(void * cgxd)676 u64 cgx_features_get(void *cgxd)
677 {
678 	return ((struct cgx *)cgxd)->hw_features;
679 }
680 
cgx_set_fec_stats_count(struct cgx_link_user_info * linfo)681 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
682 {
683 	if (!linfo->fec)
684 		return 0;
685 
686 	switch (linfo->lmac_type_id) {
687 	case LMAC_MODE_SGMII:
688 	case LMAC_MODE_XAUI:
689 	case LMAC_MODE_RXAUI:
690 	case LMAC_MODE_QSGMII:
691 		return 0;
692 	case LMAC_MODE_10G_R:
693 	case LMAC_MODE_25G_R:
694 	case LMAC_MODE_100G_R:
695 	case LMAC_MODE_USXGMII:
696 		return 1;
697 	case LMAC_MODE_40G_R:
698 		return 4;
699 	case LMAC_MODE_50G_R:
700 		if (linfo->fec == OTX2_FEC_BASER)
701 			return 2;
702 		else
703 			return 1;
704 	default:
705 		return 0;
706 	}
707 }
708 
cgx_get_fec_stats(void * cgxd,int lmac_id,struct cgx_fec_stats_rsp * rsp)709 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
710 {
711 	int stats, fec_stats_count = 0;
712 	int corr_reg, uncorr_reg;
713 	struct cgx *cgx = cgxd;
714 
715 	if (!cgx || lmac_id >= cgx->lmac_count)
716 		return -ENODEV;
717 	fec_stats_count =
718 		cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
719 	if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
720 		corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
721 		uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
722 	} else {
723 		corr_reg = CGXX_SPUX_RSFEC_CORR;
724 		uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
725 	}
726 	for (stats = 0; stats < fec_stats_count; stats++) {
727 		rsp->fec_corr_blks +=
728 			cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
729 		rsp->fec_uncorr_blks +=
730 			cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
731 	}
732 	return 0;
733 }
734 
cgx_lmac_rx_tx_enable(void * cgxd,int lmac_id,bool enable)735 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
736 {
737 	struct cgx *cgx = cgxd;
738 	u64 cfg;
739 
740 	if (!is_lmac_valid(cgx, lmac_id))
741 		return -ENODEV;
742 
743 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
744 	if (enable)
745 		cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
746 	else
747 		cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
748 	cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
749 	return 0;
750 }
751 
cgx_lmac_tx_enable(void * cgxd,int lmac_id,bool enable)752 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
753 {
754 	struct cgx *cgx = cgxd;
755 	u64 cfg, last;
756 
757 	if (!is_lmac_valid(cgx, lmac_id))
758 		return -ENODEV;
759 
760 	cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
761 	last = cfg;
762 	if (enable)
763 		cfg |= DATA_PKT_TX_EN;
764 	else
765 		cfg &= ~DATA_PKT_TX_EN;
766 
767 	if (cfg != last)
768 		cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
769 	return !!(last & DATA_PKT_TX_EN);
770 }
771 
cgx_lmac_enadis_pause_frm(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause)772 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
773 				     u8 tx_pause, u8 rx_pause)
774 {
775 	struct cgx *cgx = cgxd;
776 	u64 cfg;
777 
778 	if (is_dev_rpm(cgx))
779 		return 0;
780 
781 	if (!is_lmac_valid(cgx, lmac_id))
782 		return -ENODEV;
783 
784 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
785 	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
786 	cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
787 	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
788 
789 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
790 	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
791 	cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
792 	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
793 
794 	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
795 	if (tx_pause) {
796 		cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
797 	} else {
798 		cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
799 		cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
800 	}
801 	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
802 	return 0;
803 }
804 
cgx_lmac_pause_frm_config(void * cgxd,int lmac_id,bool enable)805 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
806 {
807 	struct cgx *cgx = cgxd;
808 	u64 cfg;
809 
810 	if (!is_lmac_valid(cgx, lmac_id))
811 		return;
812 
813 	if (enable) {
814 		/* Set pause time and interval */
815 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
816 			  DEFAULT_PAUSE_TIME);
817 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
818 		cfg &= ~0xFFFFULL;
819 		cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
820 			  cfg | (DEFAULT_PAUSE_TIME / 2));
821 
822 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
823 			  DEFAULT_PAUSE_TIME);
824 
825 		cfg = cgx_read(cgx, lmac_id,
826 			       CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
827 		cfg &= ~0xFFFFULL;
828 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
829 			  cfg | (DEFAULT_PAUSE_TIME / 2));
830 	}
831 
832 	/* ALL pause frames received are completely ignored */
833 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
834 	cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
835 	cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
836 
837 	cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
838 	cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
839 	cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
840 
841 	/* Disable pause frames transmission */
842 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
843 	cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
844 	cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
845 
846 	cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
847 	cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
848 	cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
849 	cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
850 }
851 
verify_lmac_fc_cfg(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause,int pfvf_idx)852 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
853 		       int pfvf_idx)
854 {
855 	struct cgx *cgx = cgxd;
856 	struct lmac *lmac;
857 
858 	lmac = lmac_pdata(lmac_id, cgx);
859 	if (!lmac)
860 		return -ENODEV;
861 
862 	if (!rx_pause)
863 		clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
864 	else
865 		set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
866 
867 	if (!tx_pause)
868 		clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
869 	else
870 		set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
871 
872 	/* check if other pfvfs are using flow control */
873 	if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
874 		dev_warn(&cgx->pdev->dev,
875 			 "Receive Flow control disable not permitted as its used by other PFVFs\n");
876 		return -EPERM;
877 	}
878 
879 	if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
880 		dev_warn(&cgx->pdev->dev,
881 			 "Transmit Flow control disable not permitted as its used by other PFVFs\n");
882 		return -EPERM;
883 	}
884 
885 	return 0;
886 }
887 
cgx_lmac_pfc_config(void * cgxd,int lmac_id,u8 tx_pause,u8 rx_pause,u16 pfc_en)888 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
889 			u8 rx_pause, u16 pfc_en)
890 {
891 	struct cgx *cgx = cgxd;
892 	u64 cfg;
893 
894 	if (!is_lmac_valid(cgx, lmac_id))
895 		return -ENODEV;
896 
897 	/* Return as no traffic classes are requested */
898 	if (tx_pause && !pfc_en)
899 		return 0;
900 
901 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
902 
903 	if (rx_pause) {
904 		cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
905 			CGXX_SMUX_CBFC_CTL_BCK_EN |
906 			CGXX_SMUX_CBFC_CTL_DRP_EN);
907 	} else {
908 		cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
909 			CGXX_SMUX_CBFC_CTL_BCK_EN |
910 			CGXX_SMUX_CBFC_CTL_DRP_EN);
911 	}
912 
913 	if (tx_pause)
914 		cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
915 	else
916 		cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
917 
918 	cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
919 
920 	cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
921 
922 	/* Write source MAC address which will be filled into PFC packet */
923 	cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
924 	cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
925 
926 	return 0;
927 }
928 
cgx_lmac_get_pfc_frm_cfg(void * cgxd,int lmac_id,u8 * tx_pause,u8 * rx_pause)929 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
930 			     u8 *rx_pause)
931 {
932 	struct cgx *cgx = cgxd;
933 	u64 cfg;
934 
935 	if (!is_lmac_valid(cgx, lmac_id))
936 		return -ENODEV;
937 
938 	cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
939 
940 	*rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
941 	*tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
942 
943 	return 0;
944 }
945 
cgx_lmac_ptp_config(void * cgxd,int lmac_id,bool enable)946 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
947 {
948 	struct cgx *cgx = cgxd;
949 	u64 cfg;
950 
951 	if (!cgx)
952 		return;
953 
954 	if (enable) {
955 		/* Enable inbound PTP timestamping */
956 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
957 		cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
958 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
959 
960 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
961 		cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
962 		cgx_write(cgx, lmac_id,	CGXX_SMUX_RX_FRM_CTL, cfg);
963 	} else {
964 		/* Disable inbound PTP stamping */
965 		cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
966 		cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
967 		cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
968 
969 		cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
970 		cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
971 		cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
972 	}
973 }
974 
975 /* CGX Firmware interface low level support */
cgx_fwi_cmd_send(u64 req,u64 * resp,struct lmac * lmac)976 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
977 {
978 	struct cgx *cgx = lmac->cgx;
979 	struct device *dev;
980 	int err = 0;
981 	u64 cmd;
982 
983 	/* Ensure no other command is in progress */
984 	err = mutex_lock_interruptible(&lmac->cmd_lock);
985 	if (err)
986 		return err;
987 
988 	/* Ensure command register is free */
989 	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
990 	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
991 		err = -EBUSY;
992 		goto unlock;
993 	}
994 
995 	/* Update ownership in command request */
996 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
997 
998 	/* Mark this lmac as pending, before we start */
999 	lmac->cmd_pend = true;
1000 
1001 	/* Start command in hardware */
1002 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1003 
1004 	/* Ensure command is completed without errors */
1005 	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1006 				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1007 		dev = &cgx->pdev->dev;
1008 		dev_err(dev, "cgx port %d:%d cmd timeout\n",
1009 			cgx->cgx_id, lmac->lmac_id);
1010 		err = -EIO;
1011 		goto unlock;
1012 	}
1013 
1014 	/* we have a valid command response */
1015 	smp_rmb(); /* Ensure the latest updates are visible */
1016 	*resp = lmac->resp;
1017 
1018 unlock:
1019 	mutex_unlock(&lmac->cmd_lock);
1020 
1021 	return err;
1022 }
1023 
cgx_fwi_cmd_generic(u64 req,u64 * resp,struct cgx * cgx,int lmac_id)1024 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1025 {
1026 	struct lmac *lmac;
1027 	int err;
1028 
1029 	lmac = lmac_pdata(lmac_id, cgx);
1030 	if (!lmac)
1031 		return -ENODEV;
1032 
1033 	err = cgx_fwi_cmd_send(req, resp, lmac);
1034 
1035 	/* Check for valid response */
1036 	if (!err) {
1037 		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1038 			return -EIO;
1039 		else
1040 			return 0;
1041 	}
1042 
1043 	return err;
1044 }
1045 
cgx_link_usertable_index_map(int speed)1046 static int cgx_link_usertable_index_map(int speed)
1047 {
1048 	switch (speed) {
1049 	case SPEED_10:
1050 		return CGX_LINK_10M;
1051 	case SPEED_100:
1052 		return CGX_LINK_100M;
1053 	case SPEED_1000:
1054 		return CGX_LINK_1G;
1055 	case SPEED_2500:
1056 		return CGX_LINK_2HG;
1057 	case SPEED_5000:
1058 		return CGX_LINK_5G;
1059 	case SPEED_10000:
1060 		return CGX_LINK_10G;
1061 	case SPEED_20000:
1062 		return CGX_LINK_20G;
1063 	case SPEED_25000:
1064 		return CGX_LINK_25G;
1065 	case SPEED_40000:
1066 		return CGX_LINK_40G;
1067 	case SPEED_50000:
1068 		return CGX_LINK_50G;
1069 	case 80000:
1070 		return CGX_LINK_80G;
1071 	case SPEED_100000:
1072 		return CGX_LINK_100G;
1073 	case SPEED_UNKNOWN:
1074 		return CGX_LINK_NONE;
1075 	}
1076 	return CGX_LINK_NONE;
1077 }
1078 
set_mod_args(struct cgx_set_link_mode_args * args,u32 speed,u8 duplex,u8 autoneg,u64 mode)1079 static void set_mod_args(struct cgx_set_link_mode_args *args,
1080 			 u32 speed, u8 duplex, u8 autoneg, u64 mode)
1081 {
1082 	/* Fill default values incase of user did not pass
1083 	 * valid parameters
1084 	 */
1085 	if (args->duplex == DUPLEX_UNKNOWN)
1086 		args->duplex = duplex;
1087 	if (args->speed == SPEED_UNKNOWN)
1088 		args->speed = speed;
1089 	if (args->an == AUTONEG_UNKNOWN)
1090 		args->an = autoneg;
1091 	args->mode = mode;
1092 	args->ports = 0;
1093 }
1094 
otx2_map_ethtool_link_modes(u64 bitmask,struct cgx_set_link_mode_args * args)1095 static void otx2_map_ethtool_link_modes(u64 bitmask,
1096 					struct cgx_set_link_mode_args *args)
1097 {
1098 	switch (bitmask) {
1099 	case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1100 		set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1101 		break;
1102 	case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1103 		set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1104 		break;
1105 	case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1106 		set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1107 		break;
1108 	case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1109 		set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1110 		break;
1111 	case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1112 		set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1113 		break;
1114 	case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1115 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1116 		break;
1117 	case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1118 		set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1119 		break;
1120 	case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1121 		set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1122 		break;
1123 	case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1124 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1125 		break;
1126 	case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1127 		set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1128 		break;
1129 	case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1130 		set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1131 		break;
1132 	case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1133 		set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1134 		break;
1135 	case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1136 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1137 		break;
1138 	case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1139 		set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1140 		break;
1141 	case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1142 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1143 		break;
1144 	case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1145 		set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1146 		break;
1147 	case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1148 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1149 		break;
1150 	case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1151 		set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1152 		break;
1153 	case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1154 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1155 		break;
1156 	case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1157 		set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1158 		break;
1159 	case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1160 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1161 		break;
1162 	case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1163 		set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1164 		break;
1165 	case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1166 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1167 		break;
1168 	case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1169 		set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1170 		break;
1171 	case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1172 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1173 		break;
1174 	case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1175 		set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1176 		break;
1177 	default:
1178 		set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1179 		break;
1180 	}
1181 }
1182 
link_status_user_format(u64 lstat,struct cgx_link_user_info * linfo,struct cgx * cgx,u8 lmac_id)1183 static inline void link_status_user_format(u64 lstat,
1184 					   struct cgx_link_user_info *linfo,
1185 					   struct cgx *cgx, u8 lmac_id)
1186 {
1187 	const char *lmac_string;
1188 
1189 	linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1190 	linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1191 	linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1192 	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1193 	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1194 	linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
1195 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1196 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1197 }
1198 
1199 /* Hardware event handlers */
cgx_link_change_handler(u64 lstat,struct lmac * lmac)1200 static inline void cgx_link_change_handler(u64 lstat,
1201 					   struct lmac *lmac)
1202 {
1203 	struct cgx_link_user_info *linfo;
1204 	struct cgx *cgx = lmac->cgx;
1205 	struct cgx_link_event event;
1206 	struct device *dev;
1207 	int err_type;
1208 
1209 	dev = &cgx->pdev->dev;
1210 
1211 	link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1212 	err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1213 
1214 	event.cgx_id = cgx->cgx_id;
1215 	event.lmac_id = lmac->lmac_id;
1216 
1217 	/* update the local copy of link status */
1218 	lmac->link_info = event.link_uinfo;
1219 	linfo = &lmac->link_info;
1220 
1221 	if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1222 		return;
1223 
1224 	/* Ensure callback doesn't get unregistered until we finish it */
1225 	spin_lock(&lmac->event_cb_lock);
1226 
1227 	if (!lmac->event_cb.notify_link_chg) {
1228 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
1229 			cgx->cgx_id, lmac->lmac_id);
1230 		if (err_type != CGX_ERR_NONE) {
1231 			dev_err(dev, "cgx port %d:%d Link error %d\n",
1232 				cgx->cgx_id, lmac->lmac_id, err_type);
1233 		}
1234 		dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1235 			 cgx->cgx_id, lmac->lmac_id,
1236 			 linfo->link_up ? "UP" : "DOWN", linfo->speed);
1237 		goto err;
1238 	}
1239 
1240 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1241 		dev_err(dev, "event notification failure\n");
1242 err:
1243 	spin_unlock(&lmac->event_cb_lock);
1244 }
1245 
cgx_cmdresp_is_linkevent(u64 event)1246 static inline bool cgx_cmdresp_is_linkevent(u64 event)
1247 {
1248 	u8 id;
1249 
1250 	id = FIELD_GET(EVTREG_ID, event);
1251 	if (id == CGX_CMD_LINK_BRING_UP ||
1252 	    id == CGX_CMD_LINK_BRING_DOWN ||
1253 	    id == CGX_CMD_MODE_CHANGE)
1254 		return true;
1255 	else
1256 		return false;
1257 }
1258 
cgx_event_is_linkevent(u64 event)1259 static inline bool cgx_event_is_linkevent(u64 event)
1260 {
1261 	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1262 		return true;
1263 	else
1264 		return false;
1265 }
1266 
cgx_fwi_event_handler(int irq,void * data)1267 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1268 {
1269 	u64 event, offset, clear_bit;
1270 	struct lmac *lmac = data;
1271 	struct cgx *cgx;
1272 
1273 	cgx = lmac->cgx;
1274 
1275 	/* Clear SW_INT for RPM and CMR_INT for CGX */
1276 	offset     = cgx->mac_ops->int_register;
1277 	clear_bit  = cgx->mac_ops->int_ena_bit;
1278 
1279 	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1280 
1281 	if (!FIELD_GET(EVTREG_ACK, event))
1282 		return IRQ_NONE;
1283 
1284 	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1285 	case CGX_EVT_CMD_RESP:
1286 		/* Copy the response. Since only one command is active at a
1287 		 * time, there is no way a response can get overwritten
1288 		 */
1289 		lmac->resp = event;
1290 		/* Ensure response is updated before thread context starts */
1291 		smp_wmb();
1292 
1293 		/* There wont be separate events for link change initiated from
1294 		 * software; Hence report the command responses as events
1295 		 */
1296 		if (cgx_cmdresp_is_linkevent(event))
1297 			cgx_link_change_handler(event, lmac);
1298 
1299 		/* Release thread waiting for completion  */
1300 		lmac->cmd_pend = false;
1301 		wake_up_interruptible(&lmac->wq_cmd_cmplt);
1302 		break;
1303 	case CGX_EVT_ASYNC:
1304 		if (cgx_event_is_linkevent(event))
1305 			cgx_link_change_handler(event, lmac);
1306 		break;
1307 	}
1308 
1309 	/* Any new event or command response will be posted by firmware
1310 	 * only after the current status is acked.
1311 	 * Ack the interrupt register as well.
1312 	 */
1313 	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1314 	cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1315 
1316 	return IRQ_HANDLED;
1317 }
1318 
1319 /* APIs for PHY management using CGX firmware interface */
1320 
1321 /* callback registration for hardware events like link change */
cgx_lmac_evh_register(struct cgx_event_cb * cb,void * cgxd,int lmac_id)1322 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1323 {
1324 	struct cgx *cgx = cgxd;
1325 	struct lmac *lmac;
1326 
1327 	lmac = lmac_pdata(lmac_id, cgx);
1328 	if (!lmac)
1329 		return -ENODEV;
1330 
1331 	lmac->event_cb = *cb;
1332 
1333 	return 0;
1334 }
1335 
cgx_lmac_evh_unregister(void * cgxd,int lmac_id)1336 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1337 {
1338 	struct lmac *lmac;
1339 	unsigned long flags;
1340 	struct cgx *cgx = cgxd;
1341 
1342 	lmac = lmac_pdata(lmac_id, cgx);
1343 	if (!lmac)
1344 		return -ENODEV;
1345 
1346 	spin_lock_irqsave(&lmac->event_cb_lock, flags);
1347 	lmac->event_cb.notify_link_chg = NULL;
1348 	lmac->event_cb.data = NULL;
1349 	spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1350 
1351 	return 0;
1352 }
1353 
cgx_get_fwdata_base(u64 * base)1354 int cgx_get_fwdata_base(u64 *base)
1355 {
1356 	u64 req = 0, resp;
1357 	struct cgx *cgx;
1358 	int first_lmac;
1359 	int err;
1360 
1361 	cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1362 	if (!cgx)
1363 		return -ENXIO;
1364 
1365 	first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1366 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1367 	err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1368 	if (!err)
1369 		*base = FIELD_GET(RESP_FWD_BASE, resp);
1370 
1371 	return err;
1372 }
1373 
cgx_set_link_mode(void * cgxd,struct cgx_set_link_mode_args args,int cgx_id,int lmac_id)1374 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1375 		      int cgx_id, int lmac_id)
1376 {
1377 	struct cgx *cgx = cgxd;
1378 	u64 req = 0, resp;
1379 
1380 	if (!cgx)
1381 		return -ENODEV;
1382 
1383 	if (args.mode)
1384 		otx2_map_ethtool_link_modes(args.mode, &args);
1385 	if (!args.speed && args.duplex && !args.an)
1386 		return -EINVAL;
1387 
1388 	req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1389 	req = FIELD_SET(CMDMODECHANGE_SPEED,
1390 			cgx_link_usertable_index_map(args.speed), req);
1391 	req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1392 	req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1393 	req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1394 	req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1395 
1396 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1397 }
cgx_set_fec(u64 fec,int cgx_id,int lmac_id)1398 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1399 {
1400 	u64 req = 0, resp;
1401 	struct cgx *cgx;
1402 	int err = 0;
1403 
1404 	cgx = cgx_get_pdata(cgx_id);
1405 	if (!cgx)
1406 		return -ENXIO;
1407 
1408 	req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1409 	req = FIELD_SET(CMDSETFEC, fec, req);
1410 	err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1411 	if (err)
1412 		return err;
1413 
1414 	cgx->lmac_idmap[lmac_id]->link_info.fec =
1415 			FIELD_GET(RESP_LINKSTAT_FEC, resp);
1416 	return cgx->lmac_idmap[lmac_id]->link_info.fec;
1417 }
1418 
cgx_get_phy_fec_stats(void * cgxd,int lmac_id)1419 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1420 {
1421 	struct cgx *cgx = cgxd;
1422 	u64 req = 0, resp;
1423 
1424 	if (!cgx)
1425 		return -ENODEV;
1426 
1427 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1428 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1429 }
1430 
cgx_fwi_link_change(struct cgx * cgx,int lmac_id,bool enable)1431 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1432 {
1433 	u64 req = 0;
1434 	u64 resp;
1435 
1436 	if (enable)
1437 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1438 	else
1439 		req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1440 
1441 	return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1442 }
1443 
cgx_fwi_read_version(u64 * resp,struct cgx * cgx)1444 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1445 {
1446 	int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1447 	u64 req = 0;
1448 
1449 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1450 	return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1451 }
1452 
cgx_lmac_verify_fwi_version(struct cgx * cgx)1453 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1454 {
1455 	struct device *dev = &cgx->pdev->dev;
1456 	int major_ver, minor_ver;
1457 	u64 resp;
1458 	int err;
1459 
1460 	if (!cgx->lmac_count)
1461 		return 0;
1462 
1463 	err = cgx_fwi_read_version(&resp, cgx);
1464 	if (err)
1465 		return err;
1466 
1467 	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1468 	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1469 	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1470 		major_ver, minor_ver);
1471 	if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1472 		return -EIO;
1473 	else
1474 		return 0;
1475 }
1476 
cgx_lmac_linkup_work(struct work_struct * work)1477 static void cgx_lmac_linkup_work(struct work_struct *work)
1478 {
1479 	struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1480 	struct device *dev = &cgx->pdev->dev;
1481 	int i, err;
1482 
1483 	/* Do Link up for all the enabled lmacs */
1484 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1485 		err = cgx_fwi_link_change(cgx, i, true);
1486 		if (err)
1487 			dev_info(dev, "cgx port %d:%d Link up command failed\n",
1488 				 cgx->cgx_id, i);
1489 	}
1490 }
1491 
cgx_lmac_linkup_start(void * cgxd)1492 int cgx_lmac_linkup_start(void *cgxd)
1493 {
1494 	struct cgx *cgx = cgxd;
1495 
1496 	if (!cgx)
1497 		return -ENODEV;
1498 
1499 	queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1500 
1501 	return 0;
1502 }
1503 
cgx_lmac_get_fifolen(struct cgx * cgx)1504 static void cgx_lmac_get_fifolen(struct cgx *cgx)
1505 {
1506 	u64 cfg;
1507 
1508 	cfg = cgx_read(cgx, 0, CGX_CONST);
1509 	cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1510 }
1511 
cgx_configure_interrupt(struct cgx * cgx,struct lmac * lmac,int cnt,bool req_free)1512 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1513 				   int cnt, bool req_free)
1514 {
1515 	struct mac_ops *mac_ops = cgx->mac_ops;
1516 	u64 offset, ena_bit;
1517 	unsigned int irq;
1518 	int err;
1519 
1520 	irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1521 				  cnt * mac_ops->irq_offset);
1522 	offset   = mac_ops->int_set_reg;
1523 	ena_bit  = mac_ops->int_ena_bit;
1524 
1525 	if (req_free) {
1526 		free_irq(irq, lmac);
1527 		return 0;
1528 	}
1529 
1530 	err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1531 	if (err)
1532 		return err;
1533 
1534 	/* Enable interrupt */
1535 	cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1536 	return 0;
1537 }
1538 
cgx_get_nr_lmacs(void * cgxd)1539 int cgx_get_nr_lmacs(void *cgxd)
1540 {
1541 	struct cgx *cgx = cgxd;
1542 
1543 	return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1544 }
1545 
cgx_get_lmacid(void * cgxd,u8 lmac_index)1546 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1547 {
1548 	struct cgx *cgx = cgxd;
1549 
1550 	return cgx->lmac_idmap[lmac_index]->lmac_id;
1551 }
1552 
cgx_get_lmac_bmap(void * cgxd)1553 unsigned long cgx_get_lmac_bmap(void *cgxd)
1554 {
1555 	struct cgx *cgx = cgxd;
1556 
1557 	return cgx->lmac_bmap;
1558 }
1559 
cgx_lmac_init(struct cgx * cgx)1560 static int cgx_lmac_init(struct cgx *cgx)
1561 {
1562 	struct lmac *lmac;
1563 	u64 lmac_list;
1564 	int i, err;
1565 
1566 	cgx_lmac_get_fifolen(cgx);
1567 
1568 	cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1569 	/* lmac_list specifies which lmacs are enabled
1570 	 * when bit n is set to 1, LMAC[n] is enabled
1571 	 */
1572 	if (cgx->mac_ops->non_contiguous_serdes_lane)
1573 		lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1574 
1575 	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1576 		cgx->lmac_count = MAX_LMAC_PER_CGX;
1577 
1578 	for (i = 0; i < cgx->lmac_count; i++) {
1579 		lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1580 		if (!lmac)
1581 			return -ENOMEM;
1582 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1583 		if (!lmac->name) {
1584 			err = -ENOMEM;
1585 			goto err_lmac_free;
1586 		}
1587 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1588 		if (cgx->mac_ops->non_contiguous_serdes_lane) {
1589 			lmac->lmac_id = __ffs64(lmac_list);
1590 			lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1591 		} else {
1592 			lmac->lmac_id = i;
1593 		}
1594 
1595 		lmac->cgx = cgx;
1596 		lmac->mac_to_index_bmap.max =
1597 				MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
1598 		err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1599 		if (err)
1600 			goto err_name_free;
1601 
1602 		/* Reserve first entry for default MAC address */
1603 		set_bit(0, lmac->mac_to_index_bmap.bmap);
1604 
1605 		lmac->rx_fc_pfvf_bmap.max = 128;
1606 		err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1607 		if (err)
1608 			goto err_dmac_bmap_free;
1609 
1610 		lmac->tx_fc_pfvf_bmap.max = 128;
1611 		err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1612 		if (err)
1613 			goto err_rx_fc_bmap_free;
1614 
1615 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
1616 		mutex_init(&lmac->cmd_lock);
1617 		spin_lock_init(&lmac->event_cb_lock);
1618 		err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1619 		if (err)
1620 			goto err_bitmap_free;
1621 
1622 		/* Add reference */
1623 		cgx->lmac_idmap[lmac->lmac_id] = lmac;
1624 		set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1625 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1626 	}
1627 
1628 	return cgx_lmac_verify_fwi_version(cgx);
1629 
1630 err_bitmap_free:
1631 	rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1632 err_rx_fc_bmap_free:
1633 	rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1634 err_dmac_bmap_free:
1635 	rvu_free_bitmap(&lmac->mac_to_index_bmap);
1636 err_name_free:
1637 	kfree(lmac->name);
1638 err_lmac_free:
1639 	kfree(lmac);
1640 	return err;
1641 }
1642 
cgx_lmac_exit(struct cgx * cgx)1643 static int cgx_lmac_exit(struct cgx *cgx)
1644 {
1645 	struct lmac *lmac;
1646 	int i;
1647 
1648 	if (cgx->cgx_cmd_workq) {
1649 		destroy_workqueue(cgx->cgx_cmd_workq);
1650 		cgx->cgx_cmd_workq = NULL;
1651 	}
1652 
1653 	/* Free all lmac related resources */
1654 	for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1655 		lmac = cgx->lmac_idmap[i];
1656 		if (!lmac)
1657 			continue;
1658 		cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1659 		cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1660 		kfree(lmac->mac_to_index_bmap.bmap);
1661 		kfree(lmac->name);
1662 		kfree(lmac);
1663 	}
1664 
1665 	return 0;
1666 }
1667 
cgx_populate_features(struct cgx * cgx)1668 static void cgx_populate_features(struct cgx *cgx)
1669 {
1670 	if (is_dev_rpm(cgx))
1671 		cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1672 				    RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1673 	else
1674 		cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1675 				    RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1676 }
1677 
1678 static struct mac_ops	cgx_mac_ops    = {
1679 	.name		=       "cgx",
1680 	.csr_offset	=       0,
1681 	.lmac_offset    =       18,
1682 	.int_register	=       CGXX_CMRX_INT,
1683 	.int_set_reg	=       CGXX_CMRX_INT_ENA_W1S,
1684 	.irq_offset	=       9,
1685 	.int_ena_bit    =       FW_CGX_INT,
1686 	.lmac_fwi	=	CGX_LMAC_FWI,
1687 	.non_contiguous_serdes_lane = false,
1688 	.rx_stats_cnt   =       9,
1689 	.tx_stats_cnt   =       18,
1690 	.get_nr_lmacs	=	cgx_get_nr_lmacs,
1691 	.get_lmac_type  =       cgx_get_lmac_type,
1692 	.mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1693 	.mac_get_rx_stats  =	cgx_get_rx_stats,
1694 	.mac_get_tx_stats  =	cgx_get_tx_stats,
1695 	.mac_enadis_rx_pause_fwding =	cgx_lmac_enadis_rx_pause_fwding,
1696 	.mac_get_pause_frm_status =	cgx_lmac_get_pause_frm_status,
1697 	.mac_enadis_pause_frm =		cgx_lmac_enadis_pause_frm,
1698 	.mac_pause_frm_config =		cgx_lmac_pause_frm_config,
1699 	.mac_enadis_ptp_config =	cgx_lmac_ptp_config,
1700 	.mac_rx_tx_enable =		cgx_lmac_rx_tx_enable,
1701 	.mac_tx_enable =		cgx_lmac_tx_enable,
1702 	.pfc_config =                   cgx_lmac_pfc_config,
1703 	.mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1704 };
1705 
cgx_probe(struct pci_dev * pdev,const struct pci_device_id * id)1706 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1707 {
1708 	struct device *dev = &pdev->dev;
1709 	struct cgx *cgx;
1710 	int err, nvec;
1711 
1712 	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1713 	if (!cgx)
1714 		return -ENOMEM;
1715 	cgx->pdev = pdev;
1716 
1717 	pci_set_drvdata(pdev, cgx);
1718 
1719 	/* Use mac_ops to get MAC specific features */
1720 	if (pdev->device == PCI_DEVID_CN10K_RPM)
1721 		cgx->mac_ops = rpm_get_mac_ops();
1722 	else
1723 		cgx->mac_ops = &cgx_mac_ops;
1724 
1725 	err = pci_enable_device(pdev);
1726 	if (err) {
1727 		dev_err(dev, "Failed to enable PCI device\n");
1728 		pci_set_drvdata(pdev, NULL);
1729 		return err;
1730 	}
1731 
1732 	err = pci_request_regions(pdev, DRV_NAME);
1733 	if (err) {
1734 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1735 		goto err_disable_device;
1736 	}
1737 
1738 	/* MAP configuration registers */
1739 	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1740 	if (!cgx->reg_base) {
1741 		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1742 		err = -ENOMEM;
1743 		goto err_release_regions;
1744 	}
1745 
1746 	nvec = pci_msix_vec_count(cgx->pdev);
1747 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1748 	if (err < 0 || err != nvec) {
1749 		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1750 			nvec, err);
1751 		goto err_release_regions;
1752 	}
1753 
1754 	cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1755 		& CGX_ID_MASK;
1756 
1757 	/* init wq for processing linkup requests */
1758 	INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1759 	cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1760 	if (!cgx->cgx_cmd_workq) {
1761 		dev_err(dev, "alloc workqueue failed for cgx cmd");
1762 		err = -ENOMEM;
1763 		goto err_free_irq_vectors;
1764 	}
1765 
1766 	list_add(&cgx->cgx_list, &cgx_list);
1767 
1768 
1769 	cgx_populate_features(cgx);
1770 
1771 	mutex_init(&cgx->lock);
1772 
1773 	err = cgx_lmac_init(cgx);
1774 	if (err)
1775 		goto err_release_lmac;
1776 
1777 	return 0;
1778 
1779 err_release_lmac:
1780 	cgx_lmac_exit(cgx);
1781 	list_del(&cgx->cgx_list);
1782 err_free_irq_vectors:
1783 	pci_free_irq_vectors(pdev);
1784 err_release_regions:
1785 	pci_release_regions(pdev);
1786 err_disable_device:
1787 	pci_disable_device(pdev);
1788 	pci_set_drvdata(pdev, NULL);
1789 	return err;
1790 }
1791 
cgx_remove(struct pci_dev * pdev)1792 static void cgx_remove(struct pci_dev *pdev)
1793 {
1794 	struct cgx *cgx = pci_get_drvdata(pdev);
1795 
1796 	if (cgx) {
1797 		cgx_lmac_exit(cgx);
1798 		list_del(&cgx->cgx_list);
1799 	}
1800 	pci_free_irq_vectors(pdev);
1801 	pci_release_regions(pdev);
1802 	pci_disable_device(pdev);
1803 	pci_set_drvdata(pdev, NULL);
1804 }
1805 
1806 struct pci_driver cgx_driver = {
1807 	.name = DRV_NAME,
1808 	.id_table = cgx_id_table,
1809 	.probe = cgx_probe,
1810 	.remove = cgx_remove,
1811 };
1812