1 /*
2  * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3  *
4  *  This file is free software: you may copy, redistribute and/or modify it
5  *  under the terms of the GNU General Public License as published by the
6  *  Free Software Foundation, either version 2 of the License, or (at your
7  *  option) any later version.
8  *
9  *  This file is distributed in the hope that it will be useful, but
10  *  WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  *  General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  *
17  * This file incorporates work covered by the following copyright and
18  * permission notice:
19  *
20  * Copyright (c) 2012 Qualcomm Atheros, Inc.
21  *
22  * Permission to use, copy, modify, and/or distribute this software for any
23  * purpose with or without fee is hereby granted, provided that the above
24  * copyright notice and this permission notice appear in all copies.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33  */
34 #include <linux/etherdevice.h>
35 #include <linux/delay.h>
36 #include <linux/pci.h>
37 #include <linux/mdio.h>
38 #include "reg.h"
39 #include "hw.h"
40 
alx_is_rev_a(u8 rev)41 static inline bool alx_is_rev_a(u8 rev)
42 {
43 	return rev == ALX_REV_A0 || rev == ALX_REV_A1;
44 }
45 
alx_wait_mdio_idle(struct alx_hw * hw)46 static int alx_wait_mdio_idle(struct alx_hw *hw)
47 {
48 	u32 val;
49 	int i;
50 
51 	for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
52 		val = alx_read_mem32(hw, ALX_MDIO);
53 		if (!(val & ALX_MDIO_BUSY))
54 			return 0;
55 		udelay(10);
56 	}
57 
58 	return -ETIMEDOUT;
59 }
60 
alx_read_phy_core(struct alx_hw * hw,bool ext,u8 dev,u16 reg,u16 * phy_data)61 static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
62 			     u16 reg, u16 *phy_data)
63 {
64 	u32 val, clk_sel;
65 	int err;
66 
67 	*phy_data = 0;
68 
69 	/* use slow clock when it's in hibernation status */
70 	clk_sel = hw->link_speed != SPEED_UNKNOWN ?
71 			ALX_MDIO_CLK_SEL_25MD4 :
72 			ALX_MDIO_CLK_SEL_25MD128;
73 
74 	if (ext) {
75 		val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
76 		      reg << ALX_MDIO_EXTN_REG_SHIFT;
77 		alx_write_mem32(hw, ALX_MDIO_EXTN, val);
78 
79 		val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
80 		      ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
81 		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
82 	} else {
83 		val = ALX_MDIO_SPRES_PRMBL |
84 		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
85 		      reg << ALX_MDIO_REG_SHIFT |
86 		      ALX_MDIO_START | ALX_MDIO_OP_READ;
87 	}
88 	alx_write_mem32(hw, ALX_MDIO, val);
89 
90 	err = alx_wait_mdio_idle(hw);
91 	if (err)
92 		return err;
93 	val = alx_read_mem32(hw, ALX_MDIO);
94 	*phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
95 	return 0;
96 }
97 
alx_write_phy_core(struct alx_hw * hw,bool ext,u8 dev,u16 reg,u16 phy_data)98 static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
99 			      u16 reg, u16 phy_data)
100 {
101 	u32 val, clk_sel;
102 
103 	/* use slow clock when it's in hibernation status */
104 	clk_sel = hw->link_speed != SPEED_UNKNOWN ?
105 			ALX_MDIO_CLK_SEL_25MD4 :
106 			ALX_MDIO_CLK_SEL_25MD128;
107 
108 	if (ext) {
109 		val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
110 		      reg << ALX_MDIO_EXTN_REG_SHIFT;
111 		alx_write_mem32(hw, ALX_MDIO_EXTN, val);
112 
113 		val = ALX_MDIO_SPRES_PRMBL |
114 		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
115 		      phy_data << ALX_MDIO_DATA_SHIFT |
116 		      ALX_MDIO_START | ALX_MDIO_MODE_EXT;
117 	} else {
118 		val = ALX_MDIO_SPRES_PRMBL |
119 		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
120 		      reg << ALX_MDIO_REG_SHIFT |
121 		      phy_data << ALX_MDIO_DATA_SHIFT |
122 		      ALX_MDIO_START;
123 	}
124 	alx_write_mem32(hw, ALX_MDIO, val);
125 
126 	return alx_wait_mdio_idle(hw);
127 }
128 
__alx_read_phy_reg(struct alx_hw * hw,u16 reg,u16 * phy_data)129 static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
130 {
131 	return alx_read_phy_core(hw, false, 0, reg, phy_data);
132 }
133 
__alx_write_phy_reg(struct alx_hw * hw,u16 reg,u16 phy_data)134 static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
135 {
136 	return alx_write_phy_core(hw, false, 0, reg, phy_data);
137 }
138 
__alx_read_phy_ext(struct alx_hw * hw,u8 dev,u16 reg,u16 * pdata)139 static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
140 {
141 	return alx_read_phy_core(hw, true, dev, reg, pdata);
142 }
143 
__alx_write_phy_ext(struct alx_hw * hw,u8 dev,u16 reg,u16 data)144 static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
145 {
146 	return alx_write_phy_core(hw, true, dev, reg, data);
147 }
148 
__alx_read_phy_dbg(struct alx_hw * hw,u16 reg,u16 * pdata)149 static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
150 {
151 	int err;
152 
153 	err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
154 	if (err)
155 		return err;
156 
157 	return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
158 }
159 
__alx_write_phy_dbg(struct alx_hw * hw,u16 reg,u16 data)160 static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
161 {
162 	int err;
163 
164 	err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
165 	if (err)
166 		return err;
167 
168 	return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
169 }
170 
alx_read_phy_reg(struct alx_hw * hw,u16 reg,u16 * phy_data)171 int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
172 {
173 	int err;
174 
175 	spin_lock(&hw->mdio_lock);
176 	err = __alx_read_phy_reg(hw, reg, phy_data);
177 	spin_unlock(&hw->mdio_lock);
178 
179 	return err;
180 }
181 
alx_write_phy_reg(struct alx_hw * hw,u16 reg,u16 phy_data)182 int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
183 {
184 	int err;
185 
186 	spin_lock(&hw->mdio_lock);
187 	err = __alx_write_phy_reg(hw, reg, phy_data);
188 	spin_unlock(&hw->mdio_lock);
189 
190 	return err;
191 }
192 
alx_read_phy_ext(struct alx_hw * hw,u8 dev,u16 reg,u16 * pdata)193 int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
194 {
195 	int err;
196 
197 	spin_lock(&hw->mdio_lock);
198 	err = __alx_read_phy_ext(hw, dev, reg, pdata);
199 	spin_unlock(&hw->mdio_lock);
200 
201 	return err;
202 }
203 
alx_write_phy_ext(struct alx_hw * hw,u8 dev,u16 reg,u16 data)204 int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
205 {
206 	int err;
207 
208 	spin_lock(&hw->mdio_lock);
209 	err = __alx_write_phy_ext(hw, dev, reg, data);
210 	spin_unlock(&hw->mdio_lock);
211 
212 	return err;
213 }
214 
alx_read_phy_dbg(struct alx_hw * hw,u16 reg,u16 * pdata)215 static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
216 {
217 	int err;
218 
219 	spin_lock(&hw->mdio_lock);
220 	err = __alx_read_phy_dbg(hw, reg, pdata);
221 	spin_unlock(&hw->mdio_lock);
222 
223 	return err;
224 }
225 
alx_write_phy_dbg(struct alx_hw * hw,u16 reg,u16 data)226 static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
227 {
228 	int err;
229 
230 	spin_lock(&hw->mdio_lock);
231 	err = __alx_write_phy_dbg(hw, reg, data);
232 	spin_unlock(&hw->mdio_lock);
233 
234 	return err;
235 }
236 
alx_get_phy_config(struct alx_hw * hw)237 static u16 alx_get_phy_config(struct alx_hw *hw)
238 {
239 	u32 val;
240 	u16 phy_val;
241 
242 	val = alx_read_mem32(hw, ALX_PHY_CTRL);
243 	/* phy in reset */
244 	if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
245 		return ALX_DRV_PHY_UNKNOWN;
246 
247 	val = alx_read_mem32(hw, ALX_DRV);
248 	val = ALX_GET_FIELD(val, ALX_DRV_PHY);
249 	if (ALX_DRV_PHY_UNKNOWN == val)
250 		return ALX_DRV_PHY_UNKNOWN;
251 
252 	alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
253 	if (ALX_PHY_INITED == phy_val)
254 		return val;
255 
256 	return ALX_DRV_PHY_UNKNOWN;
257 }
258 
alx_wait_reg(struct alx_hw * hw,u32 reg,u32 wait,u32 * val)259 static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
260 {
261 	u32 read;
262 	int i;
263 
264 	for (i = 0; i < ALX_SLD_MAX_TO; i++) {
265 		read = alx_read_mem32(hw, reg);
266 		if ((read & wait) == 0) {
267 			if (val)
268 				*val = read;
269 			return true;
270 		}
271 		mdelay(1);
272 	}
273 
274 	return false;
275 }
276 
alx_read_macaddr(struct alx_hw * hw,u8 * addr)277 static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
278 {
279 	u32 mac0, mac1;
280 
281 	mac0 = alx_read_mem32(hw, ALX_STAD0);
282 	mac1 = alx_read_mem32(hw, ALX_STAD1);
283 
284 	/* addr should be big-endian */
285 	put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2));
286 	put_unaligned(cpu_to_be16(mac1), (__be16 *)addr);
287 
288 	return is_valid_ether_addr(addr);
289 }
290 
alx_get_perm_macaddr(struct alx_hw * hw,u8 * addr)291 int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
292 {
293 	u32 val;
294 
295 	/* try to get it from register first */
296 	if (alx_read_macaddr(hw, addr))
297 		return 0;
298 
299 	/* try to load from efuse */
300 	if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
301 		return -EIO;
302 	alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
303 	if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
304 		return -EIO;
305 	if (alx_read_macaddr(hw, addr))
306 		return 0;
307 
308 	/* try to load from flash/eeprom (if present) */
309 	val = alx_read_mem32(hw, ALX_EFLD);
310 	if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
311 		if (!alx_wait_reg(hw, ALX_EFLD,
312 				  ALX_EFLD_STAT | ALX_EFLD_START, &val))
313 			return -EIO;
314 		alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
315 		if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
316 			return -EIO;
317 		if (alx_read_macaddr(hw, addr))
318 			return 0;
319 	}
320 
321 	return -EIO;
322 }
323 
alx_set_macaddr(struct alx_hw * hw,const u8 * addr)324 void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
325 {
326 	u32 val;
327 
328 	/* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 	val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2)));
330 	alx_write_mem32(hw, ALX_STAD0, val);
331 	val = be16_to_cpu(get_unaligned((__be16 *)addr));
332 	alx_write_mem32(hw, ALX_STAD1, val);
333 }
334 
alx_reset_osc(struct alx_hw * hw,u8 rev)335 static void alx_reset_osc(struct alx_hw *hw, u8 rev)
336 {
337 	u32 val, val2;
338 
339 	/* clear Internal OSC settings, switching OSC by hw itself */
340 	val = alx_read_mem32(hw, ALX_MISC3);
341 	alx_write_mem32(hw, ALX_MISC3,
342 			(val & ~ALX_MISC3_25M_BY_SW) |
343 			ALX_MISC3_25M_NOTO_INTNL);
344 
345 	/* 25M clk from chipset may be unstable 1s after de-assert of
346 	 * PERST, driver need re-calibrate before enter Sleep for WoL
347 	 */
348 	val = alx_read_mem32(hw, ALX_MISC);
349 	if (rev >= ALX_REV_B0) {
350 		/* restore over current protection def-val,
351 		 * this val could be reset by MAC-RST
352 		 */
353 		ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
354 		/* a 0->1 change will update the internal val of osc */
355 		val &= ~ALX_MISC_INTNLOSC_OPEN;
356 		alx_write_mem32(hw, ALX_MISC, val);
357 		alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
358 		/* hw will automatically dis OSC after cab. */
359 		val2 = alx_read_mem32(hw, ALX_MSIC2);
360 		val2 &= ~ALX_MSIC2_CALB_START;
361 		alx_write_mem32(hw, ALX_MSIC2, val2);
362 		alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
363 	} else {
364 		val &= ~ALX_MISC_INTNLOSC_OPEN;
365 		/* disable isolate for rev A devices */
366 		if (alx_is_rev_a(rev))
367 			val &= ~ALX_MISC_ISO_EN;
368 
369 		alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
370 		alx_write_mem32(hw, ALX_MISC, val);
371 	}
372 
373 	udelay(20);
374 }
375 
alx_stop_mac(struct alx_hw * hw)376 static int alx_stop_mac(struct alx_hw *hw)
377 {
378 	u32 rxq, txq, val;
379 	u16 i;
380 
381 	rxq = alx_read_mem32(hw, ALX_RXQ0);
382 	alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
383 	txq = alx_read_mem32(hw, ALX_TXQ0);
384 	alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
385 
386 	udelay(40);
387 
388 	hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
389 	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
390 
391 	for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
392 		val = alx_read_mem32(hw, ALX_MAC_STS);
393 		if (!(val & ALX_MAC_STS_IDLE))
394 			return 0;
395 		udelay(10);
396 	}
397 
398 	return -ETIMEDOUT;
399 }
400 
alx_reset_mac(struct alx_hw * hw)401 int alx_reset_mac(struct alx_hw *hw)
402 {
403 	u32 val, pmctrl;
404 	int i, ret;
405 	u8 rev;
406 	bool a_cr;
407 
408 	pmctrl = 0;
409 	rev = alx_hw_revision(hw);
410 	a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
411 
412 	/* disable all interrupts, RXQ/TXQ */
413 	alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
414 	alx_write_mem32(hw, ALX_IMR, 0);
415 	alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
416 
417 	ret = alx_stop_mac(hw);
418 	if (ret)
419 		return ret;
420 
421 	/* mac reset workaroud */
422 	alx_write_mem32(hw, ALX_RFD_PIDX, 1);
423 
424 	/* dis l0s/l1 before mac reset */
425 	if (a_cr) {
426 		pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
427 		if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
428 			alx_write_mem32(hw, ALX_PMCTRL,
429 					pmctrl & ~(ALX_PMCTRL_L1_EN |
430 						   ALX_PMCTRL_L0S_EN));
431 	}
432 
433 	/* reset whole mac safely */
434 	val = alx_read_mem32(hw, ALX_MASTER);
435 	alx_write_mem32(hw, ALX_MASTER,
436 			val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
437 
438 	/* make sure it's real idle */
439 	udelay(10);
440 	for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
441 		val = alx_read_mem32(hw, ALX_RFD_PIDX);
442 		if (val == 0)
443 			break;
444 		udelay(10);
445 	}
446 	for (; i < ALX_DMA_MAC_RST_TO; i++) {
447 		val = alx_read_mem32(hw, ALX_MASTER);
448 		if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
449 			break;
450 		udelay(10);
451 	}
452 	if (i == ALX_DMA_MAC_RST_TO)
453 		return -EIO;
454 	udelay(10);
455 
456 	if (a_cr) {
457 		alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
458 		/* restore l0s / l1 */
459 		if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
460 			alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
461 	}
462 
463 	alx_reset_osc(hw, rev);
464 
465 	/* clear Internal OSC settings, switching OSC by hw itself,
466 	 * disable isolate for rev A devices
467 	 */
468 	val = alx_read_mem32(hw, ALX_MISC3);
469 	alx_write_mem32(hw, ALX_MISC3,
470 			(val & ~ALX_MISC3_25M_BY_SW) |
471 			ALX_MISC3_25M_NOTO_INTNL);
472 	val = alx_read_mem32(hw, ALX_MISC);
473 	val &= ~ALX_MISC_INTNLOSC_OPEN;
474 	if (alx_is_rev_a(rev))
475 		val &= ~ALX_MISC_ISO_EN;
476 	alx_write_mem32(hw, ALX_MISC, val);
477 	udelay(20);
478 
479 	/* driver control speed/duplex, hash-alg */
480 	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
481 
482 	val = alx_read_mem32(hw, ALX_SERDES);
483 	alx_write_mem32(hw, ALX_SERDES,
484 			val | ALX_SERDES_MACCLK_SLWDWN |
485 			ALX_SERDES_PHYCLK_SLWDWN);
486 
487 	return 0;
488 }
489 
alx_reset_phy(struct alx_hw * hw)490 void alx_reset_phy(struct alx_hw *hw)
491 {
492 	int i;
493 	u32 val;
494 	u16 phy_val;
495 
496 	/* (DSP)reset PHY core */
497 	val = alx_read_mem32(hw, ALX_PHY_CTRL);
498 	val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
499 		 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
500 		 ALX_PHY_CTRL_CLS);
501 	val |= ALX_PHY_CTRL_RST_ANALOG;
502 
503 	val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
504 	alx_write_mem32(hw, ALX_PHY_CTRL, val);
505 	udelay(10);
506 	alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
507 
508 	for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
509 		udelay(10);
510 
511 	/* phy power saving & hib */
512 	alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
513 	alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
514 			  ALX_SYSMODCTRL_IECHOADJ_DEF);
515 	alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
516 			  ALX_VDRVBIAS_DEF);
517 
518 	/* EEE advertisement */
519 	val = alx_read_mem32(hw, ALX_LPI_CTRL);
520 	alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
521 	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
522 
523 	/* phy power saving */
524 	alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
525 	alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
526 	alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
527 	alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
528 	alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
529 	alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
530 			  phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
531 	/* rtl8139c, 120m issue */
532 	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
533 			  ALX_MIIEXT_NLP78_120M_DEF);
534 	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
535 			  ALX_MIIEXT_S3DIG10_DEF);
536 
537 	if (hw->lnk_patch) {
538 		/* Turn off half amplitude */
539 		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
540 				 &phy_val);
541 		alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
542 				  phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
543 		/* Turn off Green feature */
544 		alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
545 		alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
546 				  phy_val | ALX_GREENCFG2_BP_GREEN);
547 		/* Turn off half Bias */
548 		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
549 				 &phy_val);
550 		alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
551 				  phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
552 	}
553 
554 	/* set phy interrupt mask */
555 	alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
556 }
557 
558 #define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
559 
alx_reset_pcie(struct alx_hw * hw)560 void alx_reset_pcie(struct alx_hw *hw)
561 {
562 	u8 rev = alx_hw_revision(hw);
563 	u32 val;
564 	u16 val16;
565 
566 	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
567 	pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
568 	if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
569 		val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
570 		pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
571 	}
572 
573 	/* clear WoL setting/status */
574 	val = alx_read_mem32(hw, ALX_WOL0);
575 	alx_write_mem32(hw, ALX_WOL0, 0);
576 
577 	val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
578 	alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
579 
580 	/* mask some pcie error bits */
581 	val = alx_read_mem32(hw, ALX_UE_SVRT);
582 	val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
583 	alx_write_mem32(hw, ALX_UE_SVRT, val);
584 
585 	/* wol 25M & pclk */
586 	val = alx_read_mem32(hw, ALX_MASTER);
587 	if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
588 		if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
589 		    (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
590 			alx_write_mem32(hw, ALX_MASTER,
591 					val | ALX_MASTER_PCLKSEL_SRDS |
592 					ALX_MASTER_WAKEN_25M);
593 	} else {
594 		if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
595 		    (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
596 			alx_write_mem32(hw, ALX_MASTER,
597 					(val & ~ALX_MASTER_PCLKSEL_SRDS) |
598 					ALX_MASTER_WAKEN_25M);
599 	}
600 
601 	/* ASPM setting */
602 	alx_enable_aspm(hw, true, true);
603 
604 	udelay(10);
605 }
606 
alx_start_mac(struct alx_hw * hw)607 void alx_start_mac(struct alx_hw *hw)
608 {
609 	u32 mac, txq, rxq;
610 
611 	rxq = alx_read_mem32(hw, ALX_RXQ0);
612 	alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
613 	txq = alx_read_mem32(hw, ALX_TXQ0);
614 	alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
615 
616 	mac = hw->rx_ctrl;
617 	if (hw->duplex == DUPLEX_FULL)
618 		mac |= ALX_MAC_CTRL_FULLD;
619 	else
620 		mac &= ~ALX_MAC_CTRL_FULLD;
621 	ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
622 		      hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
623 						     ALX_MAC_CTRL_SPEED_10_100);
624 	mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
625 	hw->rx_ctrl = mac;
626 	alx_write_mem32(hw, ALX_MAC_CTRL, mac);
627 }
628 
alx_cfg_mac_flowcontrol(struct alx_hw * hw,u8 fc)629 void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
630 {
631 	if (fc & ALX_FC_RX)
632 		hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
633 	else
634 		hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
635 
636 	if (fc & ALX_FC_TX)
637 		hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
638 	else
639 		hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
640 
641 	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
642 }
643 
alx_enable_aspm(struct alx_hw * hw,bool l0s_en,bool l1_en)644 void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
645 {
646 	u32 pmctrl;
647 	u8 rev = alx_hw_revision(hw);
648 
649 	pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
650 
651 	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
652 		      ALX_PMCTRL_LCKDET_TIMER_DEF);
653 	pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
654 		  ALX_PMCTRL_L1_CLKSW_EN |
655 		  ALX_PMCTRL_L1_SRDSRX_PWD;
656 	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
657 	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
658 	pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
659 		    ALX_PMCTRL_L1_SRDSPLL_EN |
660 		    ALX_PMCTRL_L1_BUFSRX_EN |
661 		    ALX_PMCTRL_SADLY_EN |
662 		    ALX_PMCTRL_HOTRST_WTEN|
663 		    ALX_PMCTRL_L0S_EN |
664 		    ALX_PMCTRL_L1_EN |
665 		    ALX_PMCTRL_ASPM_FCEN |
666 		    ALX_PMCTRL_TXL1_AFTER_L0S |
667 		    ALX_PMCTRL_RXL1_AFTER_L0S);
668 	if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
669 		pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
670 
671 	if (l0s_en)
672 		pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
673 	if (l1_en)
674 		pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
675 
676 	alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
677 }
678 
679 
ethadv_to_hw_cfg(struct alx_hw * hw,u32 ethadv_cfg)680 static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
681 {
682 	u32 cfg = 0;
683 
684 	if (ethadv_cfg & ADVERTISED_Autoneg) {
685 		cfg |= ALX_DRV_PHY_AUTO;
686 		if (ethadv_cfg & ADVERTISED_10baseT_Half)
687 			cfg |= ALX_DRV_PHY_10;
688 		if (ethadv_cfg & ADVERTISED_10baseT_Full)
689 			cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
690 		if (ethadv_cfg & ADVERTISED_100baseT_Half)
691 			cfg |= ALX_DRV_PHY_100;
692 		if (ethadv_cfg & ADVERTISED_100baseT_Full)
693 			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
694 		if (ethadv_cfg & ADVERTISED_1000baseT_Half)
695 			cfg |= ALX_DRV_PHY_1000;
696 		if (ethadv_cfg & ADVERTISED_1000baseT_Full)
697 			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
698 		if (ethadv_cfg & ADVERTISED_Pause)
699 			cfg |= ADVERTISE_PAUSE_CAP;
700 		if (ethadv_cfg & ADVERTISED_Asym_Pause)
701 			cfg |= ADVERTISE_PAUSE_ASYM;
702 	} else {
703 		switch (ethadv_cfg) {
704 		case ADVERTISED_10baseT_Half:
705 			cfg |= ALX_DRV_PHY_10;
706 			break;
707 		case ADVERTISED_100baseT_Half:
708 			cfg |= ALX_DRV_PHY_100;
709 			break;
710 		case ADVERTISED_10baseT_Full:
711 			cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
712 			break;
713 		case ADVERTISED_100baseT_Full:
714 			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
715 			break;
716 		}
717 	}
718 
719 	return cfg;
720 }
721 
alx_setup_speed_duplex(struct alx_hw * hw,u32 ethadv,u8 flowctrl)722 int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
723 {
724 	u16 adv, giga, cr;
725 	u32 val;
726 	int err = 0;
727 
728 	alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
729 	val = alx_read_mem32(hw, ALX_DRV);
730 	ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
731 
732 	if (ethadv & ADVERTISED_Autoneg) {
733 		adv = ADVERTISE_CSMA;
734 		adv |= ethtool_adv_to_mii_adv_t(ethadv);
735 
736 		if (flowctrl & ALX_FC_ANEG) {
737 			if (flowctrl & ALX_FC_RX) {
738 				adv |= ADVERTISED_Pause;
739 				if (!(flowctrl & ALX_FC_TX))
740 					adv |= ADVERTISED_Asym_Pause;
741 			} else if (flowctrl & ALX_FC_TX) {
742 				adv |= ADVERTISED_Asym_Pause;
743 			}
744 		}
745 		giga = 0;
746 		if (alx_hw_giga(hw))
747 			giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
748 
749 		cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
750 
751 		if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
752 		    alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
753 		    alx_write_phy_reg(hw, MII_BMCR, cr))
754 			err = -EBUSY;
755 	} else {
756 		cr = BMCR_RESET;
757 		if (ethadv == ADVERTISED_100baseT_Half ||
758 		    ethadv == ADVERTISED_100baseT_Full)
759 			cr |= BMCR_SPEED100;
760 		if (ethadv == ADVERTISED_10baseT_Full ||
761 		    ethadv == ADVERTISED_100baseT_Full)
762 			cr |= BMCR_FULLDPLX;
763 
764 		err = alx_write_phy_reg(hw, MII_BMCR, cr);
765 	}
766 
767 	if (!err) {
768 		alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
769 		val |= ethadv_to_hw_cfg(hw, ethadv);
770 	}
771 
772 	alx_write_mem32(hw, ALX_DRV, val);
773 
774 	return err;
775 }
776 
777 
alx_post_phy_link(struct alx_hw * hw)778 void alx_post_phy_link(struct alx_hw *hw)
779 {
780 	u16 phy_val, len, agc;
781 	u8 revid = alx_hw_revision(hw);
782 	bool adj_th = revid == ALX_REV_B0;
783 
784 	if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
785 		return;
786 
787 	/* 1000BT/AZ, wrong cable length */
788 	if (hw->link_speed != SPEED_UNKNOWN) {
789 		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
790 				 &phy_val);
791 		len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
792 		alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
793 		agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
794 
795 		if ((hw->link_speed == SPEED_1000 &&
796 		     (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
797 		      (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
798 		    (hw->link_speed == SPEED_100 &&
799 		     (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
800 		      (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
801 			alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
802 					  ALX_AZ_ANADECT_LONG);
803 			alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
804 					 &phy_val);
805 			alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
806 					  phy_val | ALX_AFE_10BT_100M_TH);
807 		} else {
808 			alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
809 					  ALX_AZ_ANADECT_DEF);
810 			alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
811 					 ALX_MIIEXT_AFE, &phy_val);
812 			alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
813 					  phy_val & ~ALX_AFE_10BT_100M_TH);
814 		}
815 
816 		/* threshold adjust */
817 		if (adj_th && hw->lnk_patch) {
818 			if (hw->link_speed == SPEED_100) {
819 				alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
820 						  ALX_MSE16DB_UP);
821 			} else if (hw->link_speed == SPEED_1000) {
822 				/*
823 				 * Giga link threshold, raise the tolerance of
824 				 * noise 50%
825 				 */
826 				alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
827 						 &phy_val);
828 				ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
829 					      ALX_MSE20DB_TH_HI);
830 				alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
831 						  phy_val);
832 			}
833 		}
834 	} else {
835 		alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
836 				 &phy_val);
837 		alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
838 				  phy_val & ~ALX_AFE_10BT_100M_TH);
839 
840 		if (adj_th && hw->lnk_patch) {
841 			alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
842 					  ALX_MSE16DB_DOWN);
843 			alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
844 			ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
845 				      ALX_MSE20DB_TH_DEF);
846 			alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
847 		}
848 	}
849 }
850 
alx_phy_configured(struct alx_hw * hw)851 bool alx_phy_configured(struct alx_hw *hw)
852 {
853 	u32 cfg, hw_cfg;
854 
855 	cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
856 	cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
857 	hw_cfg = alx_get_phy_config(hw);
858 
859 	if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
860 		return false;
861 
862 	return cfg == hw_cfg;
863 }
864 
alx_read_phy_link(struct alx_hw * hw)865 int alx_read_phy_link(struct alx_hw *hw)
866 {
867 	struct pci_dev *pdev = hw->pdev;
868 	u16 bmsr, giga;
869 	int err;
870 
871 	err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
872 	if (err)
873 		return err;
874 
875 	err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
876 	if (err)
877 		return err;
878 
879 	if (!(bmsr & BMSR_LSTATUS)) {
880 		hw->link_speed = SPEED_UNKNOWN;
881 		hw->duplex = DUPLEX_UNKNOWN;
882 		return 0;
883 	}
884 
885 	/* speed/duplex result is saved in PHY Specific Status Register */
886 	err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
887 	if (err)
888 		return err;
889 
890 	if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
891 		goto wrong_speed;
892 
893 	switch (giga & ALX_GIGA_PSSR_SPEED) {
894 	case ALX_GIGA_PSSR_1000MBS:
895 		hw->link_speed = SPEED_1000;
896 		break;
897 	case ALX_GIGA_PSSR_100MBS:
898 		hw->link_speed = SPEED_100;
899 		break;
900 	case ALX_GIGA_PSSR_10MBS:
901 		hw->link_speed = SPEED_10;
902 		break;
903 	default:
904 		goto wrong_speed;
905 	}
906 
907 	hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
908 	return 0;
909 
910 wrong_speed:
911 	dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
912 	return -EINVAL;
913 }
914 
alx_clear_phy_intr(struct alx_hw * hw)915 int alx_clear_phy_intr(struct alx_hw *hw)
916 {
917 	u16 isr;
918 
919 	/* clear interrupt status by reading it */
920 	return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
921 }
922 
alx_disable_rss(struct alx_hw * hw)923 void alx_disable_rss(struct alx_hw *hw)
924 {
925 	u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
926 
927 	ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
928 	alx_write_mem32(hw, ALX_RXQ0, ctrl);
929 }
930 
alx_configure_basic(struct alx_hw * hw)931 void alx_configure_basic(struct alx_hw *hw)
932 {
933 	u32 val, raw_mtu, max_payload;
934 	u16 val16;
935 	u8 chip_rev = alx_hw_revision(hw);
936 
937 	alx_set_macaddr(hw, hw->mac_addr);
938 
939 	alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
940 
941 	/* idle timeout to switch clk_125M */
942 	if (chip_rev >= ALX_REV_B0)
943 		alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
944 				ALX_IDLE_DECISN_TIMER_DEF);
945 
946 	alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
947 
948 	val = alx_read_mem32(hw, ALX_MASTER);
949 	val |= ALX_MASTER_IRQMOD2_EN |
950 	       ALX_MASTER_IRQMOD1_EN |
951 	       ALX_MASTER_SYSALVTIMER_EN;
952 	alx_write_mem32(hw, ALX_MASTER, val);
953 	alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
954 			(hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
955 	/* intr re-trig timeout */
956 	alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
957 	/* tpd threshold to trig int */
958 	alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
959 	alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
960 
961 	raw_mtu = ALX_RAW_MTU(hw->mtu);
962 	alx_write_mem32(hw, ALX_MTU, raw_mtu);
963 	if (raw_mtu > (ALX_MTU_JUMBO_TH + ETH_FCS_LEN + VLAN_HLEN))
964 		hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
965 
966 	if (raw_mtu < ALX_TXQ1_JUMBO_TSO_TH)
967 		val = (raw_mtu + 7) >> 3;
968 	else
969 		val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
970 	alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
971 
972 	max_payload = pcie_get_readrq(hw->pdev) >> 8;
973 	/*
974 	 * if BIOS had changed the default dma read max length,
975 	 * restore it to default value
976 	 */
977 	if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
978 		pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
979 
980 	val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
981 	      ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
982 	      ALX_TXQ0_SUPT_IPOPT |
983 	      ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
984 	alx_write_mem32(hw, ALX_TXQ0, val);
985 	val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
986 	      ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
987 	      ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
988 	      ALX_HQTPD_BURST_EN;
989 	alx_write_mem32(hw, ALX_HQTPD, val);
990 
991 	/* rxq, flow control */
992 	val = alx_read_mem32(hw, ALX_SRAM5);
993 	val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
994 	if (val > ALX_SRAM_RXF_LEN_8K) {
995 		val16 = ALX_MTU_STD_ALGN >> 3;
996 		val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
997 	} else {
998 		val16 = ALX_MTU_STD_ALGN >> 3;
999 		val = (val - ALX_MTU_STD_ALGN) >> 3;
1000 	}
1001 	alx_write_mem32(hw, ALX_RXQ2,
1002 			val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
1003 			val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
1004 	val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
1005 	      ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
1006 	      ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
1007 	      ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
1008 	      ALX_RXQ0_IPV6_PARSE_EN;
1009 
1010 	if (alx_hw_giga(hw))
1011 		ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
1012 			      ALX_RXQ0_ASPM_THRESH_100M);
1013 
1014 	alx_write_mem32(hw, ALX_RXQ0, val);
1015 
1016 	val = alx_read_mem32(hw, ALX_DMA);
1017 	val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
1018 	      ALX_DMA_RREQ_PRI_DATA |
1019 	      max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
1020 	      ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
1021 	      ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
1022 	      (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
1023 	alx_write_mem32(hw, ALX_DMA, val);
1024 
1025 	/* default multi-tx-q weights */
1026 	val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
1027 	      4 << ALX_WRR_PRI0_SHIFT |
1028 	      4 << ALX_WRR_PRI1_SHIFT |
1029 	      4 << ALX_WRR_PRI2_SHIFT |
1030 	      4 << ALX_WRR_PRI3_SHIFT;
1031 	alx_write_mem32(hw, ALX_WRR, val);
1032 }
1033 
alx_mask_msix(struct alx_hw * hw,int index,bool mask)1034 void alx_mask_msix(struct alx_hw *hw, int index, bool mask)
1035 {
1036 	u32 reg, val;
1037 
1038 	reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE +
1039 		PCI_MSIX_ENTRY_VECTOR_CTRL;
1040 
1041 	val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0;
1042 
1043 	alx_write_mem32(hw, reg, val);
1044 	alx_post_write(hw);
1045 }
1046 
1047 
alx_get_phy_info(struct alx_hw * hw)1048 bool alx_get_phy_info(struct alx_hw *hw)
1049 {
1050 	u16  devs1, devs2;
1051 
1052 	if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
1053 	    alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
1054 		return false;
1055 
1056 	/* since we haven't PMA/PMD status2 register, we can't
1057 	 * use mdio45_probe function for prtad and mmds.
1058 	 * use fixed MMD3 to get mmds.
1059 	 */
1060 	if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
1061 	    alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
1062 		return false;
1063 	hw->mdio.mmds = devs1 | devs2 << 16;
1064 
1065 	return true;
1066 }
1067 
alx_update_hw_stats(struct alx_hw * hw)1068 void alx_update_hw_stats(struct alx_hw *hw)
1069 {
1070 	/* RX stats */
1071 	hw->stats.rx_ok          += alx_read_mem32(hw, ALX_MIB_RX_OK);
1072 	hw->stats.rx_bcast       += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
1073 	hw->stats.rx_mcast       += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
1074 	hw->stats.rx_pause       += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
1075 	hw->stats.rx_ctrl        += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
1076 	hw->stats.rx_fcs_err     += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
1077 	hw->stats.rx_len_err     += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
1078 	hw->stats.rx_byte_cnt    += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
1079 	hw->stats.rx_runt        += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
1080 	hw->stats.rx_frag        += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
1081 	hw->stats.rx_sz_64B      += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
1082 	hw->stats.rx_sz_127B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
1083 	hw->stats.rx_sz_255B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
1084 	hw->stats.rx_sz_511B     += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
1085 	hw->stats.rx_sz_1023B    += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
1086 	hw->stats.rx_sz_1518B    += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
1087 	hw->stats.rx_sz_max      += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
1088 	hw->stats.rx_ov_sz       += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
1089 	hw->stats.rx_ov_rxf      += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
1090 	hw->stats.rx_ov_rrd      += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
1091 	hw->stats.rx_align_err   += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
1092 	hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
1093 	hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
1094 	hw->stats.rx_err_addr    += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
1095 
1096 	/* TX stats */
1097 	hw->stats.tx_ok          += alx_read_mem32(hw, ALX_MIB_TX_OK);
1098 	hw->stats.tx_bcast       += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
1099 	hw->stats.tx_mcast       += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
1100 	hw->stats.tx_pause       += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
1101 	hw->stats.tx_exc_defer   += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
1102 	hw->stats.tx_ctrl        += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
1103 	hw->stats.tx_defer       += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
1104 	hw->stats.tx_byte_cnt    += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
1105 	hw->stats.tx_sz_64B      += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
1106 	hw->stats.tx_sz_127B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
1107 	hw->stats.tx_sz_255B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
1108 	hw->stats.tx_sz_511B     += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
1109 	hw->stats.tx_sz_1023B    += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
1110 	hw->stats.tx_sz_1518B    += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
1111 	hw->stats.tx_sz_max      += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
1112 	hw->stats.tx_single_col  += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
1113 	hw->stats.tx_multi_col   += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
1114 	hw->stats.tx_late_col    += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
1115 	hw->stats.tx_abort_col   += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
1116 	hw->stats.tx_underrun    += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
1117 	hw->stats.tx_trd_eop     += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
1118 	hw->stats.tx_len_err     += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
1119 	hw->stats.tx_trunc       += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
1120 	hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
1121 	hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
1122 
1123 	hw->stats.update         += alx_read_mem32(hw, ALX_MIB_UPDATE);
1124 }
1125