1 /*
2  * Copyright (c) 2010 Broadcom Corporation
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/pci.h>
20 #include <bcmdefs.h>
21 #include <bcmutils.h>
22 #include <siutils.h>
23 #include <hndsoc.h>
24 #include <bcmdevs.h>
25 #include <sbchipc.h>
26 #include <pci_core.h>
27 #include <pcie_core.h>
28 #include <nicpci.h>
29 #include <pcicfg.h>
30 
31 typedef struct {
32 	union {
33 		sbpcieregs_t *pcieregs;
34 		struct sbpciregs *pciregs;
35 	} regs;			/* Memory mapped register to the core */
36 
37 	si_t *sih;		/* System interconnect handle */
38 	struct pci_dev *dev;
39 	u8 pciecap_lcreg_offset;	/* PCIE capability LCreg offset in the config space */
40 	bool pcie_pr42767;
41 	u8 pcie_polarity;
42 	u8 pcie_war_aspm_ovr;	/* Override ASPM/Clkreq settings */
43 
44 	u8 pmecap_offset;	/* PM Capability offset in the config space */
45 	bool pmecap;		/* Capable of generating PME */
46 } pcicore_info_t;
47 
48 /* debug/trace */
49 #define	PCI_ERROR(args)
50 #define PCIE_PUB(sih) \
51 	(((sih)->bustype == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
52 
53 /* routines to access mdio slave device registers */
54 static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk);
55 static int pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr,
56 		       bool write, uint *val);
57 static int pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint readdr,
58 			  uint val);
59 static int pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint readdr,
60 			 uint *ret_val);
61 
62 static void pcie_extendL1timer(pcicore_info_t *pi, bool extend);
63 static void pcie_clkreq_upd(pcicore_info_t *pi, uint state);
64 
65 static void pcie_war_aspm_clkreq(pcicore_info_t *pi);
66 static void pcie_war_serdes(pcicore_info_t *pi);
67 static void pcie_war_noplldown(pcicore_info_t *pi);
68 static void pcie_war_polarity(pcicore_info_t *pi);
69 static void pcie_war_pci_setup(pcicore_info_t *pi);
70 
71 static bool pcicore_pmecap(pcicore_info_t *pi);
72 
73 #define PCIE_ASPM(sih)	((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
74 
75 
76 /* delay needed between the mdio control/ mdiodata register data access */
77 #define PR28829_DELAY() udelay(10)
78 
79 /* Initialize the PCI core. It's caller's responsibility to make sure that this is done
80  * only once
81  */
pcicore_init(si_t * sih,void * pdev,void * regs)82 void *pcicore_init(si_t *sih, void *pdev, void *regs)
83 {
84 	pcicore_info_t *pi;
85 
86 	ASSERT(sih->bustype == PCI_BUS);
87 
88 	/* alloc pcicore_info_t */
89 	pi = kzalloc(sizeof(pcicore_info_t), GFP_ATOMIC);
90 	if (pi == NULL) {
91 		PCI_ERROR(("pci_attach: malloc failed!\n"));
92 		return NULL;
93 	}
94 
95 	pi->sih = sih;
96 	pi->dev = pdev;
97 
98 	if (sih->buscoretype == PCIE_CORE_ID) {
99 		u8 cap_ptr;
100 		pi->regs.pcieregs = (sbpcieregs_t *) regs;
101 		cap_ptr =
102 		    pcicore_find_pci_capability(pi->dev, PCI_CAP_PCIECAP_ID,
103 						NULL, NULL);
104 		ASSERT(cap_ptr);
105 		pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
106 	} else
107 		pi->regs.pciregs = (struct sbpciregs *) regs;
108 
109 	return pi;
110 }
111 
pcicore_deinit(void * pch)112 void pcicore_deinit(void *pch)
113 {
114 	pcicore_info_t *pi = (pcicore_info_t *) pch;
115 
116 	if (pi == NULL)
117 		return;
118 	kfree(pi);
119 }
120 
121 /* return cap_offset if requested capability exists in the PCI config space */
122 /* Note that it's caller's responsibility to make sure it's a pci bus */
123 u8
pcicore_find_pci_capability(void * dev,u8 req_cap_id,unsigned char * buf,u32 * buflen)124 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
125 			    unsigned char *buf, u32 *buflen)
126 {
127 	u8 cap_id;
128 	u8 cap_ptr = 0;
129 	u32 bufsize;
130 	u8 byte_val;
131 
132 	/* check for Header type 0 */
133 	pci_read_config_byte(dev, PCI_CFG_HDR, &byte_val);
134 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
135 		goto end;
136 
137 	/* check if the capability pointer field exists */
138 	pci_read_config_byte(dev, PCI_CFG_STAT, &byte_val);
139 	if (!(byte_val & PCI_CAPPTR_PRESENT))
140 		goto end;
141 
142 	pci_read_config_byte(dev, PCI_CFG_CAPPTR, &cap_ptr);
143 	/* check if the capability pointer is 0x00 */
144 	if (cap_ptr == 0x00)
145 		goto end;
146 
147 	/* loop thr'u the capability list and see if the pcie capabilty exists */
148 
149 	pci_read_config_byte(dev, cap_ptr, &cap_id);
150 
151 	while (cap_id != req_cap_id) {
152 		pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
153 		if (cap_ptr == 0x00)
154 			break;
155 		pci_read_config_byte(dev, cap_ptr, &cap_id);
156 	}
157 	if (cap_id != req_cap_id) {
158 		goto end;
159 	}
160 	/* found the caller requested capability */
161 	if ((buf != NULL) && (buflen != NULL)) {
162 		u8 cap_data;
163 
164 		bufsize = *buflen;
165 		if (!bufsize)
166 			goto end;
167 		*buflen = 0;
168 		/* copy the cpability data excluding cap ID and next ptr */
169 		cap_data = cap_ptr + 2;
170 		if ((bufsize + cap_data) > SZPCR)
171 			bufsize = SZPCR - cap_data;
172 		*buflen = bufsize;
173 		while (bufsize--) {
174 			pci_read_config_byte(dev, cap_data, buf);
175 			cap_data++;
176 			buf++;
177 		}
178 	}
179  end:
180 	return cap_ptr;
181 }
182 
183 /* ***** Register Access API */
184 uint
pcie_readreg(sbpcieregs_t * pcieregs,uint addrtype,uint offset)185 pcie_readreg(sbpcieregs_t *pcieregs, uint addrtype,
186 	     uint offset)
187 {
188 	uint retval = 0xFFFFFFFF;
189 
190 	ASSERT(pcieregs != NULL);
191 
192 	switch (addrtype) {
193 	case PCIE_CONFIGREGS:
194 		W_REG((&pcieregs->configaddr), offset);
195 		(void)R_REG((&pcieregs->configaddr));
196 		retval = R_REG(&(pcieregs->configdata));
197 		break;
198 	case PCIE_PCIEREGS:
199 		W_REG(&(pcieregs->pcieindaddr), offset);
200 		(void)R_REG((&pcieregs->pcieindaddr));
201 		retval = R_REG(&(pcieregs->pcieinddata));
202 		break;
203 	default:
204 		ASSERT(0);
205 		break;
206 	}
207 
208 	return retval;
209 }
210 
211 uint
pcie_writereg(sbpcieregs_t * pcieregs,uint addrtype,uint offset,uint val)212 pcie_writereg(sbpcieregs_t *pcieregs, uint addrtype,
213 	      uint offset, uint val)
214 {
215 	ASSERT(pcieregs != NULL);
216 
217 	switch (addrtype) {
218 	case PCIE_CONFIGREGS:
219 		W_REG((&pcieregs->configaddr), offset);
220 		W_REG((&pcieregs->configdata), val);
221 		break;
222 	case PCIE_PCIEREGS:
223 		W_REG((&pcieregs->pcieindaddr), offset);
224 		W_REG((&pcieregs->pcieinddata), val);
225 		break;
226 	default:
227 		ASSERT(0);
228 		break;
229 	}
230 	return 0;
231 }
232 
pcie_mdiosetblock(pcicore_info_t * pi,uint blk)233 static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk)
234 {
235 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
236 	uint mdiodata, i = 0;
237 	uint pcie_serdes_spinwait = 200;
238 
239 	mdiodata =
240 	    MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR <<
241 					       MDIODATA_DEVADDR_SHF) |
242 	    (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk <<
243 									 4);
244 	W_REG(&pcieregs->mdiodata, mdiodata);
245 
246 	PR28829_DELAY();
247 	/* retry till the transaction is complete */
248 	while (i < pcie_serdes_spinwait) {
249 		if (R_REG(&(pcieregs->mdiocontrol)) &
250 		    MDIOCTL_ACCESS_DONE) {
251 			break;
252 		}
253 		udelay(1000);
254 		i++;
255 	}
256 
257 	if (i >= pcie_serdes_spinwait) {
258 		PCI_ERROR(("pcie_mdiosetblock: timed out\n"));
259 		return false;
260 	}
261 
262 	return true;
263 }
264 
265 static int
pcie_mdioop(pcicore_info_t * pi,uint physmedia,uint regaddr,bool write,uint * val)266 pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
267 	    uint *val)
268 {
269 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
270 	uint mdiodata;
271 	uint i = 0;
272 	uint pcie_serdes_spinwait = 10;
273 
274 	/* enable mdio access to SERDES */
275 	W_REG((&pcieregs->mdiocontrol),
276 	      MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
277 
278 	if (pi->sih->buscorerev >= 10) {
279 		/* new serdes is slower in rw, using two layers of reg address mapping */
280 		if (!pcie_mdiosetblock(pi, physmedia))
281 			return 1;
282 		mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
283 		    (regaddr << MDIODATA_REGADDR_SHF);
284 		pcie_serdes_spinwait *= 20;
285 	} else {
286 		mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) |
287 		    (regaddr << MDIODATA_REGADDR_SHF_OLD);
288 	}
289 
290 	if (!write)
291 		mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
292 	else
293 		mdiodata |=
294 		    (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val);
295 
296 	W_REG(&pcieregs->mdiodata, mdiodata);
297 
298 	PR28829_DELAY();
299 
300 	/* retry till the transaction is complete */
301 	while (i < pcie_serdes_spinwait) {
302 		if (R_REG(&(pcieregs->mdiocontrol)) &
303 		    MDIOCTL_ACCESS_DONE) {
304 			if (!write) {
305 				PR28829_DELAY();
306 				*val =
307 				    (R_REG(&(pcieregs->mdiodata)) &
308 				     MDIODATA_MASK);
309 			}
310 			/* Disable mdio access to SERDES */
311 			W_REG((&pcieregs->mdiocontrol), 0);
312 			return 0;
313 		}
314 		udelay(1000);
315 		i++;
316 	}
317 
318 	PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
319 	/* Disable mdio access to SERDES */
320 	W_REG((&pcieregs->mdiocontrol), 0);
321 	return 1;
322 }
323 
324 /* use the mdio interface to read from mdio slaves */
325 static int
pcie_mdioread(pcicore_info_t * pi,uint physmedia,uint regaddr,uint * regval)326 pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint regaddr, uint *regval)
327 {
328 	return pcie_mdioop(pi, physmedia, regaddr, false, regval);
329 }
330 
331 /* use the mdio interface to write to mdio slaves */
332 static int
pcie_mdiowrite(pcicore_info_t * pi,uint physmedia,uint regaddr,uint val)333 pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint regaddr, uint val)
334 {
335 	return pcie_mdioop(pi, physmedia, regaddr, true, &val);
336 }
337 
338 /* ***** Support functions ***** */
pcie_clkreq(void * pch,u32 mask,u32 val)339 u8 pcie_clkreq(void *pch, u32 mask, u32 val)
340 {
341 	pcicore_info_t *pi = (pcicore_info_t *) pch;
342 	u32 reg_val;
343 	u8 offset;
344 
345 	offset = pi->pciecap_lcreg_offset;
346 	if (!offset)
347 		return 0;
348 
349 	pci_read_config_dword(pi->dev, offset, &reg_val);
350 	/* set operation */
351 	if (mask) {
352 		if (val)
353 			reg_val |= PCIE_CLKREQ_ENAB;
354 		else
355 			reg_val &= ~PCIE_CLKREQ_ENAB;
356 		pci_write_config_dword(pi->dev, offset, reg_val);
357 		pci_read_config_dword(pi->dev, offset, &reg_val);
358 	}
359 	if (reg_val & PCIE_CLKREQ_ENAB)
360 		return 1;
361 	else
362 		return 0;
363 }
364 
pcie_extendL1timer(pcicore_info_t * pi,bool extend)365 static void pcie_extendL1timer(pcicore_info_t *pi, bool extend)
366 {
367 	u32 w;
368 	si_t *sih = pi->sih;
369 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
370 
371 	if (!PCIE_PUB(sih) || sih->buscorerev < 7)
372 		return;
373 
374 	w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
375 	if (extend)
376 		w |= PCIE_ASPMTIMER_EXTEND;
377 	else
378 		w &= ~PCIE_ASPMTIMER_EXTEND;
379 	pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
380 	w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
381 }
382 
383 /* centralized clkreq control policy */
pcie_clkreq_upd(pcicore_info_t * pi,uint state)384 static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
385 {
386 	si_t *sih = pi->sih;
387 	ASSERT(PCIE_PUB(sih));
388 
389 	switch (state) {
390 	case SI_DOATTACH:
391 		if (PCIE_ASPM(sih))
392 			pcie_clkreq((void *)pi, 1, 0);
393 		break;
394 	case SI_PCIDOWN:
395 		if (sih->buscorerev == 6) {	/* turn on serdes PLL down */
396 			si_corereg(sih, SI_CC_IDX,
397 				   offsetof(chipcregs_t, chipcontrol_addr), ~0,
398 				   0);
399 			si_corereg(sih, SI_CC_IDX,
400 				   offsetof(chipcregs_t, chipcontrol_data),
401 				   ~0x40, 0);
402 		} else if (pi->pcie_pr42767) {
403 			pcie_clkreq((void *)pi, 1, 1);
404 		}
405 		break;
406 	case SI_PCIUP:
407 		if (sih->buscorerev == 6) {	/* turn off serdes PLL down */
408 			si_corereg(sih, SI_CC_IDX,
409 				   offsetof(chipcregs_t, chipcontrol_addr), ~0,
410 				   0);
411 			si_corereg(sih, SI_CC_IDX,
412 				   offsetof(chipcregs_t, chipcontrol_data),
413 				   ~0x40, 0x40);
414 		} else if (PCIE_ASPM(sih)) {	/* disable clkreq */
415 			pcie_clkreq((void *)pi, 1, 0);
416 		}
417 		break;
418 	default:
419 		ASSERT(0);
420 		break;
421 	}
422 }
423 
424 /* ***** PCI core WARs ***** */
425 /* Done only once at attach time */
pcie_war_polarity(pcicore_info_t * pi)426 static void pcie_war_polarity(pcicore_info_t *pi)
427 {
428 	u32 w;
429 
430 	if (pi->pcie_polarity != 0)
431 		return;
432 
433 	w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS,
434 			 PCIE_PLP_STATUSREG);
435 
436 	/* Detect the current polarity at attach and force that polarity and
437 	 * disable changing the polarity
438 	 */
439 	if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
440 		pi->pcie_polarity = (SERDES_RX_CTRL_FORCE);
441 	else
442 		pi->pcie_polarity =
443 		    (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY);
444 }
445 
446 /* enable ASPM and CLKREQ if srom doesn't have it */
447 /* Needs to happen when update to shadow SROM is needed
448  *   : Coming out of 'standby'/'hibernate'
449  *   : If pcie_war_aspm_ovr state changed
450  */
pcie_war_aspm_clkreq(pcicore_info_t * pi)451 static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
452 {
453 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
454 	si_t *sih = pi->sih;
455 	u16 val16, *reg16;
456 	u32 w;
457 
458 	if (!PCIE_ASPM(sih))
459 		return;
460 
461 	/* bypass this on QT or VSIM */
462 	if (!ISSIM_ENAB(sih)) {
463 
464 		reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
465 		val16 = R_REG(reg16);
466 
467 		val16 &= ~SRSH_ASPM_ENB;
468 		if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
469 			val16 |= SRSH_ASPM_ENB;
470 		else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
471 			val16 |= SRSH_ASPM_L1_ENB;
472 		else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
473 			val16 |= SRSH_ASPM_L0s_ENB;
474 
475 		W_REG(reg16, val16);
476 
477 		pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset,
478 					&w);
479 		w &= ~PCIE_ASPM_ENAB;
480 		w |= pi->pcie_war_aspm_ovr;
481 		pci_write_config_dword(pi->dev,
482 					pi->pciecap_lcreg_offset, w);
483 	}
484 
485 	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
486 	val16 = R_REG(reg16);
487 
488 	if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
489 		val16 |= SRSH_CLKREQ_ENB;
490 		pi->pcie_pr42767 = true;
491 	} else
492 		val16 &= ~SRSH_CLKREQ_ENB;
493 
494 	W_REG(reg16, val16);
495 }
496 
497 /* Apply the polarity determined at the start */
498 /* Needs to happen when coming out of 'standby'/'hibernate' */
pcie_war_serdes(pcicore_info_t * pi)499 static void pcie_war_serdes(pcicore_info_t *pi)
500 {
501 	u32 w = 0;
502 
503 	if (pi->pcie_polarity != 0)
504 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
505 			       pi->pcie_polarity);
506 
507 	pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
508 	if (w & PLL_CTRL_FREQDET_EN) {
509 		w &= ~PLL_CTRL_FREQDET_EN;
510 		pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
511 	}
512 }
513 
514 /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
515 /* Needs to happen when coming out of 'standby'/'hibernate' */
pcie_misc_config_fixup(pcicore_info_t * pi)516 static void pcie_misc_config_fixup(pcicore_info_t *pi)
517 {
518 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
519 	u16 val16, *reg16;
520 
521 	reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
522 	val16 = R_REG(reg16);
523 
524 	if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
525 		val16 |= SRSH_L23READY_EXIT_NOPERST;
526 		W_REG(reg16, val16);
527 	}
528 }
529 
530 /* quick hack for testing */
531 /* Needs to happen when coming out of 'standby'/'hibernate' */
pcie_war_noplldown(pcicore_info_t * pi)532 static void pcie_war_noplldown(pcicore_info_t *pi)
533 {
534 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
535 	u16 *reg16;
536 
537 	ASSERT(pi->sih->buscorerev == 7);
538 
539 	/* turn off serdes PLL down */
540 	si_corereg(pi->sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol),
541 		   CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
542 
543 	/*  clear srom shadow backdoor */
544 	reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
545 	W_REG(reg16, 0);
546 }
547 
548 /* Needs to happen when coming out of 'standby'/'hibernate' */
pcie_war_pci_setup(pcicore_info_t * pi)549 static void pcie_war_pci_setup(pcicore_info_t *pi)
550 {
551 	si_t *sih = pi->sih;
552 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
553 	u32 w;
554 
555 	if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) {
556 		w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
557 				 PCIE_TLP_WORKAROUNDSREG);
558 		w |= 0x8;
559 		pcie_writereg(pcieregs, PCIE_PCIEREGS,
560 			      PCIE_TLP_WORKAROUNDSREG, w);
561 	}
562 
563 	if (sih->buscorerev == 1) {
564 		w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
565 		w |= (0x40);
566 		pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
567 	}
568 
569 	if (sih->buscorerev == 0) {
570 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
571 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
572 		pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
573 	} else if (PCIE_ASPM(sih)) {
574 		/* Change the L1 threshold for better performance */
575 		w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
576 				 PCIE_DLLP_PMTHRESHREG);
577 		w &= ~(PCIE_L1THRESHOLDTIME_MASK);
578 		w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
579 		pcie_writereg(pcieregs, PCIE_PCIEREGS,
580 			      PCIE_DLLP_PMTHRESHREG, w);
581 
582 		pcie_war_serdes(pi);
583 
584 		pcie_war_aspm_clkreq(pi);
585 	} else if (pi->sih->buscorerev == 7)
586 		pcie_war_noplldown(pi);
587 
588 	/* Note that the fix is actually in the SROM, that's why this is open-ended */
589 	if (pi->sih->buscorerev >= 6)
590 		pcie_misc_config_fixup(pi);
591 }
592 
pcie_war_ovr_aspm_update(void * pch,u8 aspm)593 void pcie_war_ovr_aspm_update(void *pch, u8 aspm)
594 {
595 	pcicore_info_t *pi = (pcicore_info_t *) pch;
596 
597 	if (!PCIE_ASPM(pi->sih))
598 		return;
599 
600 	/* Validate */
601 	if (aspm > PCIE_ASPM_ENAB)
602 		return;
603 
604 	pi->pcie_war_aspm_ovr = aspm;
605 
606 	/* Update the current state */
607 	pcie_war_aspm_clkreq(pi);
608 }
609 
610 /* ***** Functions called during driver state changes ***** */
pcicore_attach(void * pch,char * pvars,int state)611 void pcicore_attach(void *pch, char *pvars, int state)
612 {
613 	pcicore_info_t *pi = (pcicore_info_t *) pch;
614 	si_t *sih = pi->sih;
615 
616 	/* Determine if this board needs override */
617 	if (PCIE_ASPM(sih)) {
618 		if ((u32) getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR) {
619 			pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
620 		} else {
621 			pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
622 		}
623 	}
624 
625 	/* These need to happen in this order only */
626 	pcie_war_polarity(pi);
627 
628 	pcie_war_serdes(pi);
629 
630 	pcie_war_aspm_clkreq(pi);
631 
632 	pcie_clkreq_upd(pi, state);
633 
634 }
635 
pcicore_hwup(void * pch)636 void pcicore_hwup(void *pch)
637 {
638 	pcicore_info_t *pi = (pcicore_info_t *) pch;
639 
640 	if (!pi || !PCIE_PUB(pi->sih))
641 		return;
642 
643 	pcie_war_pci_setup(pi);
644 }
645 
pcicore_up(void * pch,int state)646 void pcicore_up(void *pch, int state)
647 {
648 	pcicore_info_t *pi = (pcicore_info_t *) pch;
649 
650 	if (!pi || !PCIE_PUB(pi->sih))
651 		return;
652 
653 	/* Restore L1 timer for better performance */
654 	pcie_extendL1timer(pi, true);
655 
656 	pcie_clkreq_upd(pi, state);
657 }
658 
659 /* When the device is going to enter D3 state (or the system is going to enter S3/S4 states */
pcicore_sleep(void * pch)660 void pcicore_sleep(void *pch)
661 {
662 	pcicore_info_t *pi = (pcicore_info_t *) pch;
663 	u32 w;
664 
665 	if (!pi || !PCIE_ASPM(pi->sih))
666 		return;
667 
668 	pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
669 	w &= ~PCIE_CAP_LCREG_ASPML1;
670 	pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
671 
672 	pi->pcie_pr42767 = false;
673 }
674 
pcicore_down(void * pch,int state)675 void pcicore_down(void *pch, int state)
676 {
677 	pcicore_info_t *pi = (pcicore_info_t *) pch;
678 
679 	if (!pi || !PCIE_PUB(pi->sih))
680 		return;
681 
682 	pcie_clkreq_upd(pi, state);
683 
684 	/* Reduce L1 timer for better power savings */
685 	pcie_extendL1timer(pi, false);
686 }
687 
688 /* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
689 /* Just uses PCI config accesses to find out, when needed before sb_attach is done */
pcicore_pmecap_fast(void * pch)690 bool pcicore_pmecap_fast(void *pch)
691 {
692 	pcicore_info_t *pi = (pcicore_info_t *) pch;
693 	u8 cap_ptr;
694 	u32 pmecap;
695 
696 	cap_ptr =
697 	    pcicore_find_pci_capability(pi->dev, PCI_CAP_POWERMGMTCAP_ID, NULL,
698 					NULL);
699 
700 	if (!cap_ptr)
701 		return false;
702 
703 	pci_read_config_dword(pi->dev, cap_ptr, &pmecap);
704 
705 	return (pmecap & PME_CAP_PM_STATES) != 0;
706 }
707 
708 /* return true if PM capability exists in the pci config space
709  * Uses and caches the information using core handle
710  */
pcicore_pmecap(pcicore_info_t * pi)711 static bool pcicore_pmecap(pcicore_info_t *pi)
712 {
713 	u8 cap_ptr;
714 	u32 pmecap;
715 
716 	if (!pi->pmecap_offset) {
717 		cap_ptr =
718 		    pcicore_find_pci_capability(pi->dev,
719 						PCI_CAP_POWERMGMTCAP_ID, NULL,
720 						NULL);
721 		if (!cap_ptr)
722 			return false;
723 
724 		pi->pmecap_offset = cap_ptr;
725 
726 		pci_read_config_dword(pi->dev, pi->pmecap_offset,
727 					&pmecap);
728 
729 		/* At least one state can generate PME */
730 		pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
731 	}
732 
733 	return pi->pmecap;
734 }
735 
736 /* Enable PME generation */
pcicore_pmeen(void * pch)737 void pcicore_pmeen(void *pch)
738 {
739 	pcicore_info_t *pi = (pcicore_info_t *) pch;
740 	u32 w;
741 
742 	/* if not pmecapable return */
743 	if (!pcicore_pmecap(pi))
744 		return;
745 
746 	pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
747 				&w);
748 	w |= (PME_CSR_PME_EN);
749 	pci_write_config_dword(pi->dev,
750 				pi->pmecap_offset + PME_CSR_OFFSET, w);
751 }
752 
753 /*
754  * Return true if PME status set
755  */
pcicore_pmestat(void * pch)756 bool pcicore_pmestat(void *pch)
757 {
758 	pcicore_info_t *pi = (pcicore_info_t *) pch;
759 	u32 w;
760 
761 	if (!pcicore_pmecap(pi))
762 		return false;
763 
764 	pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
765 				&w);
766 
767 	return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
768 }
769 
770 /* Disable PME generation, clear the PME status bit if set
771  */
pcicore_pmeclr(void * pch)772 void pcicore_pmeclr(void *pch)
773 {
774 	pcicore_info_t *pi = (pcicore_info_t *) pch;
775 	u32 w;
776 
777 	if (!pcicore_pmecap(pi))
778 		return;
779 
780 	pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
781 				&w);
782 
783 	PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
784 
785 	/* PMESTAT is cleared by writing 1 to it */
786 	w &= ~(PME_CSR_PME_EN);
787 
788 	pci_write_config_dword(pi->dev,
789 				pi->pmecap_offset + PME_CSR_OFFSET, w);
790 }
791 
pcie_lcreg(void * pch,u32 mask,u32 val)792 u32 pcie_lcreg(void *pch, u32 mask, u32 val)
793 {
794 	pcicore_info_t *pi = (pcicore_info_t *) pch;
795 	u8 offset;
796 	u32 tmpval;
797 
798 	offset = pi->pciecap_lcreg_offset;
799 	if (!offset)
800 		return 0;
801 
802 	/* set operation */
803 	if (mask)
804 		pci_write_config_dword(pi->dev, offset, val);
805 
806 	pci_read_config_dword(pi->dev, offset, &tmpval);
807 	return tmpval;
808 }
809 
810 u32
pcicore_pciereg(void * pch,u32 offset,u32 mask,u32 val,uint type)811 pcicore_pciereg(void *pch, u32 offset, u32 mask, u32 val, uint type)
812 {
813 	u32 reg_val = 0;
814 	pcicore_info_t *pi = (pcicore_info_t *) pch;
815 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
816 
817 	if (mask) {
818 		PCI_ERROR(("PCIEREG: 0x%x writeval  0x%x\n", offset, val));
819 		pcie_writereg(pcieregs, type, offset, val);
820 	}
821 
822 	/* Should not read register 0x154 */
823 	if (pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11
824 	    && type == PCIE_PCIEREGS)
825 		return reg_val;
826 
827 	reg_val = pcie_readreg(pcieregs, type, offset);
828 	PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val));
829 
830 	return reg_val;
831 }
832 
833 u32
pcicore_pcieserdesreg(void * pch,u32 mdioslave,u32 offset,u32 mask,u32 val)834 pcicore_pcieserdesreg(void *pch, u32 mdioslave, u32 offset, u32 mask,
835 		      u32 val)
836 {
837 	u32 reg_val = 0;
838 	pcicore_info_t *pi = (pcicore_info_t *) pch;
839 
840 	if (mask) {
841 		PCI_ERROR(("PCIEMDIOREG: 0x%x writeval  0x%x\n", offset, val));
842 		pcie_mdiowrite(pi, mdioslave, offset, val);
843 	}
844 
845 	if (pcie_mdioread(pi, mdioslave, offset, &reg_val))
846 		reg_val = 0xFFFFFFFF;
847 	PCI_ERROR(("PCIEMDIOREG: dev 0x%x offset 0x%x read 0x%x\n", mdioslave,
848 		   offset, reg_val));
849 
850 	return reg_val;
851 }
852