1 /*
2  * Broadcom specific AMBA
3  * PCI Core
4  *
5  * Copyright 2005, 2011, Broadcom Corporation
6  * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7  * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
8  *
9  * Licensed under the GNU/GPL. See COPYING for details.
10  */
11 
12 #include "bcma_private.h"
13 #include <linux/export.h>
14 #include <linux/bcma/bcma.h>
15 
16 /**************************************************
17  * R/W ops.
18  **************************************************/
19 
bcma_pcie_read(struct bcma_drv_pci * pc,u32 address)20 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
21 {
22 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
23 	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
24 	return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
25 }
26 
27 #if 0
28 static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
29 {
30 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
31 	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
32 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
33 }
34 #endif
35 
bcma_pcie_mdio_set_phy(struct bcma_drv_pci * pc,u8 phy)36 static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
37 {
38 	u32 v;
39 	int i;
40 
41 	v = BCMA_CORE_PCI_MDIODATA_START;
42 	v |= BCMA_CORE_PCI_MDIODATA_WRITE;
43 	v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
44 	      BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
45 	v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
46 	      BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
47 	v |= BCMA_CORE_PCI_MDIODATA_TA;
48 	v |= (phy << 4);
49 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
50 
51 	udelay(10);
52 	for (i = 0; i < 200; i++) {
53 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
54 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
55 			break;
56 		msleep(1);
57 	}
58 }
59 
bcma_pcie_mdio_read(struct bcma_drv_pci * pc,u8 device,u8 address)60 static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
61 {
62 	int max_retries = 10;
63 	u16 ret = 0;
64 	u32 v;
65 	int i;
66 
67 	/* enable mdio access to SERDES */
68 	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
69 	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
70 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
71 
72 	if (pc->core->id.rev >= 10) {
73 		max_retries = 200;
74 		bcma_pcie_mdio_set_phy(pc, device);
75 		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
76 		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
77 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
78 	} else {
79 		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
80 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
81 	}
82 
83 	v = BCMA_CORE_PCI_MDIODATA_START;
84 	v |= BCMA_CORE_PCI_MDIODATA_READ;
85 	v |= BCMA_CORE_PCI_MDIODATA_TA;
86 
87 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
88 	/* Wait for the device to complete the transaction */
89 	udelay(10);
90 	for (i = 0; i < max_retries; i++) {
91 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
92 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
93 			udelay(10);
94 			ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
95 			break;
96 		}
97 		msleep(1);
98 	}
99 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
100 	return ret;
101 }
102 
bcma_pcie_mdio_write(struct bcma_drv_pci * pc,u8 device,u8 address,u16 data)103 static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
104 				u8 address, u16 data)
105 {
106 	int max_retries = 10;
107 	u32 v;
108 	int i;
109 
110 	/* enable mdio access to SERDES */
111 	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
112 	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
113 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
114 
115 	if (pc->core->id.rev >= 10) {
116 		max_retries = 200;
117 		bcma_pcie_mdio_set_phy(pc, device);
118 		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
119 		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
120 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
121 	} else {
122 		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
123 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
124 	}
125 
126 	v = BCMA_CORE_PCI_MDIODATA_START;
127 	v |= BCMA_CORE_PCI_MDIODATA_WRITE;
128 	v |= BCMA_CORE_PCI_MDIODATA_TA;
129 	v |= data;
130 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
131 	/* Wait for the device to complete the transaction */
132 	udelay(10);
133 	for (i = 0; i < max_retries; i++) {
134 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
135 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
136 			break;
137 		msleep(1);
138 	}
139 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
140 }
141 
142 /**************************************************
143  * Workarounds.
144  **************************************************/
145 
bcma_pcicore_polarity_workaround(struct bcma_drv_pci * pc)146 static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
147 {
148 	u32 tmp;
149 
150 	tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
151 	if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
152 		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
153 		       BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
154 	else
155 		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
156 }
157 
bcma_pcicore_serdes_workaround(struct bcma_drv_pci * pc)158 static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
159 {
160 	u16 tmp;
161 
162 	bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
163 	                     BCMA_CORE_PCI_SERDES_RX_CTRL,
164 			     bcma_pcicore_polarity_workaround(pc));
165 	tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
166 	                          BCMA_CORE_PCI_SERDES_PLL_CTRL);
167 	if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
168 		bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
169 		                     BCMA_CORE_PCI_SERDES_PLL_CTRL,
170 		                     tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
171 }
172 
173 /**************************************************
174  * Init.
175  **************************************************/
176 
bcma_core_pci_clientmode_init(struct bcma_drv_pci * pc)177 static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
178 {
179 	bcma_pcicore_serdes_workaround(pc);
180 }
181 
bcma_core_pci_init(struct bcma_drv_pci * pc)182 void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
183 {
184 	if (pc->setup_done)
185 		return;
186 
187 #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
188 	pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
189 	if (pc->hostmode)
190 		bcma_core_pci_hostmode_init(pc);
191 #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
192 
193 	if (!pc->hostmode)
194 		bcma_core_pci_clientmode_init(pc);
195 }
196 
bcma_core_pci_irq_ctl(struct bcma_drv_pci * pc,struct bcma_device * core,bool enable)197 int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
198 			  bool enable)
199 {
200 	struct pci_dev *pdev = pc->core->bus->host_pci;
201 	u32 coremask, tmp;
202 	int err = 0;
203 
204 	if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
205 		/* This bcma device is not on a PCI host-bus. So the IRQs are
206 		 * not routed through the PCI core.
207 		 * So we must not enable routing through the PCI core. */
208 		goto out;
209 	}
210 
211 	err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
212 	if (err)
213 		goto out;
214 
215 	coremask = BIT(core->core_index) << 8;
216 	if (enable)
217 		tmp |= coremask;
218 	else
219 		tmp &= ~coremask;
220 
221 	err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
222 
223 out:
224 	return err;
225 }
226 EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
227