1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/ip.h>
61 
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65 
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68 
69 #define BAR_0	0
70 #define BAR_2	2
71 
72 #include "tg3.h"
73 
74 /* Functions & macros to verify TG3_FLAGS types */
75 
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	return test_bit(flag, bits);
79 }
80 
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	set_bit(flag, bits);
84 }
85 
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	clear_bit(flag, bits);
89 }
90 
91 #define tg3_flag(tp, flag)				\
92 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)				\
94 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)			\
96 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97 
98 #define DRV_MODULE_NAME		"tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM			3
101 #define TG3_MIN_NUM			137
102 
103 #define RESET_KIND_SHUTDOWN	0
104 #define RESET_KIND_INIT		1
105 #define RESET_KIND_SUSPEND	2
106 
107 #define TG3_DEF_RX_MODE		0
108 #define TG3_DEF_TX_MODE		0
109 #define TG3_DEF_MSG_ENABLE	  \
110 	(NETIF_MSG_DRV		| \
111 	 NETIF_MSG_PROBE	| \
112 	 NETIF_MSG_LINK		| \
113 	 NETIF_MSG_TIMER	| \
114 	 NETIF_MSG_IFDOWN	| \
115 	 NETIF_MSG_IFUP		| \
116 	 NETIF_MSG_RX_ERR	| \
117 	 NETIF_MSG_TX_ERR)
118 
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
120 
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124 
125 #define TG3_TX_TIMEOUT			(5 * HZ)
126 
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU			ETH_ZLEN
129 #define TG3_MAX_MTU(tp)	\
130 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING		200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
144 
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151 
152 #define TG3_TX_RING_SIZE		512
153 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
154 
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
162 				 TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 
165 #define TG3_DMA_BYTE_ENAB		64
166 
167 #define TG3_RX_STD_DMA_SZ		1536
168 #define TG3_RX_JMB_DMA_SZ		9046
169 
170 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
171 
172 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD		256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
195 #else
196 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
197 #endif
198 
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
203 #endif
204 
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K		2048
208 #define TG3_TX_BD_DMA_MAX_4K		4096
209 
210 #define TG3_RAW_IP_ALIGN 2
211 
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214 
215 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
216 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217 
218 #define FIRMWARE_TG3		"tigon/tg3.bin"
219 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
222 
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 	{}
354 };
355 
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357 
358 static const struct {
359 	const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 	{ "rx_octets" },
362 	{ "rx_fragments" },
363 	{ "rx_ucast_packets" },
364 	{ "rx_mcast_packets" },
365 	{ "rx_bcast_packets" },
366 	{ "rx_fcs_errors" },
367 	{ "rx_align_errors" },
368 	{ "rx_xon_pause_rcvd" },
369 	{ "rx_xoff_pause_rcvd" },
370 	{ "rx_mac_ctrl_rcvd" },
371 	{ "rx_xoff_entered" },
372 	{ "rx_frame_too_long_errors" },
373 	{ "rx_jabbers" },
374 	{ "rx_undersize_packets" },
375 	{ "rx_in_length_errors" },
376 	{ "rx_out_length_errors" },
377 	{ "rx_64_or_less_octet_packets" },
378 	{ "rx_65_to_127_octet_packets" },
379 	{ "rx_128_to_255_octet_packets" },
380 	{ "rx_256_to_511_octet_packets" },
381 	{ "rx_512_to_1023_octet_packets" },
382 	{ "rx_1024_to_1522_octet_packets" },
383 	{ "rx_1523_to_2047_octet_packets" },
384 	{ "rx_2048_to_4095_octet_packets" },
385 	{ "rx_4096_to_8191_octet_packets" },
386 	{ "rx_8192_to_9022_octet_packets" },
387 
388 	{ "tx_octets" },
389 	{ "tx_collisions" },
390 
391 	{ "tx_xon_sent" },
392 	{ "tx_xoff_sent" },
393 	{ "tx_flow_control" },
394 	{ "tx_mac_errors" },
395 	{ "tx_single_collisions" },
396 	{ "tx_mult_collisions" },
397 	{ "tx_deferred" },
398 	{ "tx_excessive_collisions" },
399 	{ "tx_late_collisions" },
400 	{ "tx_collide_2times" },
401 	{ "tx_collide_3times" },
402 	{ "tx_collide_4times" },
403 	{ "tx_collide_5times" },
404 	{ "tx_collide_6times" },
405 	{ "tx_collide_7times" },
406 	{ "tx_collide_8times" },
407 	{ "tx_collide_9times" },
408 	{ "tx_collide_10times" },
409 	{ "tx_collide_11times" },
410 	{ "tx_collide_12times" },
411 	{ "tx_collide_13times" },
412 	{ "tx_collide_14times" },
413 	{ "tx_collide_15times" },
414 	{ "tx_ucast_packets" },
415 	{ "tx_mcast_packets" },
416 	{ "tx_bcast_packets" },
417 	{ "tx_carrier_sense_errors" },
418 	{ "tx_discards" },
419 	{ "tx_errors" },
420 
421 	{ "dma_writeq_full" },
422 	{ "dma_write_prioq_full" },
423 	{ "rxbds_empty" },
424 	{ "rx_discards" },
425 	{ "rx_errors" },
426 	{ "rx_threshold_hit" },
427 
428 	{ "dma_readq_full" },
429 	{ "dma_read_prioq_full" },
430 	{ "tx_comp_queue_full" },
431 
432 	{ "ring_set_send_prod_index" },
433 	{ "ring_status_update" },
434 	{ "nic_irqs" },
435 	{ "nic_avoided_irqs" },
436 	{ "nic_tx_threshold_hit" },
437 
438 	{ "mbuf_lwm_thresh_hit" },
439 };
440 
441 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST		0
443 #define TG3_LINK_TEST		1
444 #define TG3_REGISTER_TEST	2
445 #define TG3_MEMORY_TEST		3
446 #define TG3_MAC_LOOPB_TEST	4
447 #define TG3_PHY_LOOPB_TEST	5
448 #define TG3_EXT_LOOPB_TEST	6
449 #define TG3_INTERRUPT_TEST	7
450 
451 
452 static const struct {
453 	const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
456 	[TG3_LINK_TEST]		= { "link test         (online) " },
457 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
458 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
459 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
460 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
461 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
462 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
463 };
464 
465 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
466 
467 
tg3_write32(struct tg3 * tp,u32 off,u32 val)468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 	writel(val, tp->regs + off);
471 }
472 
tg3_read32(struct tg3 * tp,u32 off)473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 	return readl(tp->regs + off);
476 }
477 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->aperegs + off);
481 }
482 
tg3_ape_read32(struct tg3 * tp,u32 off)483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->aperegs + off);
486 }
487 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&tp->indirect_lock, flags);
493 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	writel(val, tp->regs + off);
501 	readl(tp->regs + off);
502 }
503 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 	unsigned long flags;
507 	u32 val;
508 
509 	spin_lock_irqsave(&tp->indirect_lock, flags);
510 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 	return val;
514 }
515 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 	unsigned long flags;
519 
520 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 	if (off == TG3_RX_STD_PROD_IDX_REG) {
526 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 				       TG3_64BIT_REG_LOW, val);
528 		return;
529 	}
530 
531 	spin_lock_irqsave(&tp->indirect_lock, flags);
532 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 
536 	/* In indirect mode when disabling interrupts, we also need
537 	 * to clear the interrupt bit in the GRC local ctrl register.
538 	 */
539 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 	    (val == 0x1)) {
541 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 	}
544 }
545 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 	unsigned long flags;
549 	u32 val;
550 
551 	spin_lock_irqsave(&tp->indirect_lock, flags);
552 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 	return val;
556 }
557 
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 		/* Non-posted methods */
567 		tp->write32(tp, off, val);
568 	else {
569 		/* Posted method */
570 		tg3_write32(tp, off, val);
571 		if (usec_wait)
572 			udelay(usec_wait);
573 		tp->read32(tp, off);
574 	}
575 	/* Wait again after the read for the posted method to guarantee that
576 	 * the wait time is met.
577 	 */
578 	if (usec_wait)
579 		udelay(usec_wait);
580 }
581 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 	tp->write32_mbox(tp, off, val);
585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 	     !tg3_flag(tp, ICH_WORKAROUND)))
588 		tp->read32_mbox(tp, off);
589 }
590 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 	void __iomem *mbox = tp->regs + off;
594 	writel(val, mbox);
595 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 		writel(val, mbox);
597 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
599 		readl(mbox);
600 }
601 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 	return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 	writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611 
612 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
617 
618 #define tw32(reg, val)			tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)			tp->read32(tp, reg)
622 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 	unsigned long flags;
626 
627 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 		return;
630 
631 	spin_lock_irqsave(&tp->indirect_lock, flags);
632 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	} else {
639 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	}
645 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 	unsigned long flags;
651 
652 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 		*val = 0;
655 		return;
656 	}
657 
658 	spin_lock_irqsave(&tp->indirect_lock, flags);
659 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662 
663 		/* Always leave this as zero. */
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	} else {
666 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		*val = tr32(TG3PCI_MEM_WIN_DATA);
668 
669 		/* Always leave this as zero. */
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	}
672 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674 
tg3_ape_lock_init(struct tg3 * tp)675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 	int i;
678 	u32 regbase, bit;
679 
680 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 		regbase = TG3_APE_LOCK_GRANT;
682 	else
683 		regbase = TG3_APE_PER_LOCK_GRANT;
684 
685 	/* Make sure the driver hasn't any stale locks. */
686 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 		switch (i) {
688 		case TG3_APE_LOCK_PHY0:
689 		case TG3_APE_LOCK_PHY1:
690 		case TG3_APE_LOCK_PHY2:
691 		case TG3_APE_LOCK_PHY3:
692 			bit = APE_LOCK_GRANT_DRIVER;
693 			break;
694 		default:
695 			if (!tp->pci_fn)
696 				bit = APE_LOCK_GRANT_DRIVER;
697 			else
698 				bit = 1 << tp->pci_fn;
699 		}
700 		tg3_ape_write32(tp, regbase + 4 * i, bit);
701 	}
702 
703 }
704 
tg3_ape_lock(struct tg3 * tp,int locknum)705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 	int i, off;
708 	int ret = 0;
709 	u32 status, req, gnt, bit;
710 
711 	if (!tg3_flag(tp, ENABLE_APE))
712 		return 0;
713 
714 	switch (locknum) {
715 	case TG3_APE_LOCK_GPIO:
716 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 			return 0;
718 		fallthrough;
719 	case TG3_APE_LOCK_GRC:
720 	case TG3_APE_LOCK_MEM:
721 		if (!tp->pci_fn)
722 			bit = APE_LOCK_REQ_DRIVER;
723 		else
724 			bit = 1 << tp->pci_fn;
725 		break;
726 	case TG3_APE_LOCK_PHY0:
727 	case TG3_APE_LOCK_PHY1:
728 	case TG3_APE_LOCK_PHY2:
729 	case TG3_APE_LOCK_PHY3:
730 		bit = APE_LOCK_REQ_DRIVER;
731 		break;
732 	default:
733 		return -EINVAL;
734 	}
735 
736 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 		req = TG3_APE_LOCK_REQ;
738 		gnt = TG3_APE_LOCK_GRANT;
739 	} else {
740 		req = TG3_APE_PER_LOCK_REQ;
741 		gnt = TG3_APE_PER_LOCK_GRANT;
742 	}
743 
744 	off = 4 * locknum;
745 
746 	tg3_ape_write32(tp, req + off, bit);
747 
748 	/* Wait for up to 1 millisecond to acquire lock. */
749 	for (i = 0; i < 100; i++) {
750 		status = tg3_ape_read32(tp, gnt + off);
751 		if (status == bit)
752 			break;
753 		if (pci_channel_offline(tp->pdev))
754 			break;
755 
756 		udelay(10);
757 	}
758 
759 	if (status != bit) {
760 		/* Revoke the lock request. */
761 		tg3_ape_write32(tp, gnt + off, bit);
762 		ret = -EBUSY;
763 	}
764 
765 	return ret;
766 }
767 
tg3_ape_unlock(struct tg3 * tp,int locknum)768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770 	u32 gnt, bit;
771 
772 	if (!tg3_flag(tp, ENABLE_APE))
773 		return;
774 
775 	switch (locknum) {
776 	case TG3_APE_LOCK_GPIO:
777 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
778 			return;
779 		fallthrough;
780 	case TG3_APE_LOCK_GRC:
781 	case TG3_APE_LOCK_MEM:
782 		if (!tp->pci_fn)
783 			bit = APE_LOCK_GRANT_DRIVER;
784 		else
785 			bit = 1 << tp->pci_fn;
786 		break;
787 	case TG3_APE_LOCK_PHY0:
788 	case TG3_APE_LOCK_PHY1:
789 	case TG3_APE_LOCK_PHY2:
790 	case TG3_APE_LOCK_PHY3:
791 		bit = APE_LOCK_GRANT_DRIVER;
792 		break;
793 	default:
794 		return;
795 	}
796 
797 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 		gnt = TG3_APE_LOCK_GRANT;
799 	else
800 		gnt = TG3_APE_PER_LOCK_GRANT;
801 
802 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804 
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807 	u32 apedata;
808 
809 	while (timeout_us) {
810 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811 			return -EBUSY;
812 
813 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815 			break;
816 
817 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818 
819 		udelay(10);
820 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821 	}
822 
823 	return timeout_us ? 0 : -EBUSY;
824 }
825 
826 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829 	u32 i, apedata;
830 
831 	for (i = 0; i < timeout_us / 10; i++) {
832 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833 
834 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835 			break;
836 
837 		udelay(10);
838 	}
839 
840 	return i == timeout_us / 10;
841 }
842 
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 				   u32 len)
845 {
846 	int err;
847 	u32 i, bufoff, msgoff, maxlen, apedata;
848 
849 	if (!tg3_flag(tp, APE_HAS_NCSI))
850 		return 0;
851 
852 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 	if (apedata != APE_SEG_SIG_MAGIC)
854 		return -ENODEV;
855 
856 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 	if (!(apedata & APE_FW_STATUS_READY))
858 		return -EAGAIN;
859 
860 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861 		 TG3_APE_SHMEM_BASE;
862 	msgoff = bufoff + 2 * sizeof(u32);
863 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864 
865 	while (len) {
866 		u32 length;
867 
868 		/* Cap xfer sizes to scratchpad limits. */
869 		length = (len > maxlen) ? maxlen : len;
870 		len -= length;
871 
872 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 		if (!(apedata & APE_FW_STATUS_READY))
874 			return -EAGAIN;
875 
876 		/* Wait for up to 1 msec for APE to service previous event. */
877 		err = tg3_ape_event_lock(tp, 1000);
878 		if (err)
879 			return err;
880 
881 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 			  APE_EVENT_STATUS_SCRTCHPD_READ |
883 			  APE_EVENT_STATUS_EVENT_PENDING;
884 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885 
886 		tg3_ape_write32(tp, bufoff, base_off);
887 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888 
889 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891 
892 		base_off += length;
893 
894 		if (tg3_ape_wait_for_event(tp, 30000))
895 			return -EAGAIN;
896 
897 		for (i = 0; length; i += 4, length -= 4) {
898 			u32 val = tg3_ape_read32(tp, msgoff + i);
899 			memcpy(data, &val, sizeof(u32));
900 			data++;
901 		}
902 	}
903 
904 	return 0;
905 }
906 #endif
907 
tg3_ape_send_event(struct tg3 * tp,u32 event)908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910 	int err;
911 	u32 apedata;
912 
913 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 	if (apedata != APE_SEG_SIG_MAGIC)
915 		return -EAGAIN;
916 
917 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 	if (!(apedata & APE_FW_STATUS_READY))
919 		return -EAGAIN;
920 
921 	/* Wait for up to 20 millisecond for APE to service previous event. */
922 	err = tg3_ape_event_lock(tp, 20000);
923 	if (err)
924 		return err;
925 
926 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 			event | APE_EVENT_STATUS_EVENT_PENDING);
928 
929 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931 
932 	return 0;
933 }
934 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937 	u32 event;
938 	u32 apedata;
939 
940 	if (!tg3_flag(tp, ENABLE_APE))
941 		return;
942 
943 	switch (kind) {
944 	case RESET_KIND_INIT:
945 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 				APE_HOST_SEG_SIG_MAGIC);
948 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 				APE_HOST_SEG_LEN_MAGIC);
950 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 				APE_HOST_BEHAV_NO_PHYLOCK);
956 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 				    TG3_APE_HOST_DRVR_STATE_START);
958 
959 		event = APE_EVENT_STATUS_STATE_START;
960 		break;
961 	case RESET_KIND_SHUTDOWN:
962 		if (device_may_wakeup(&tp->pdev->dev) &&
963 		    tg3_flag(tp, WOL_ENABLE)) {
964 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 					    TG3_APE_HOST_WOL_SPEED_AUTO);
966 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967 		} else
968 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969 
970 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971 
972 		event = APE_EVENT_STATUS_STATE_UNLOAD;
973 		break;
974 	default:
975 		return;
976 	}
977 
978 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979 
980 	tg3_ape_send_event(tp, event);
981 }
982 
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 				   unsigned long interval)
985 {
986 	/* Check if hb interval has exceeded */
987 	if (!tg3_flag(tp, ENABLE_APE) ||
988 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
989 		return;
990 
991 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 	tp->ape_hb_jiffies = jiffies;
993 }
994 
tg3_disable_ints(struct tg3 * tp)995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997 	int i;
998 
999 	tw32(TG3PCI_MISC_HOST_CTRL,
1000 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 	for (i = 0; i < tp->irq_max; i++)
1002 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004 
tg3_enable_ints(struct tg3 * tp)1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007 	int i;
1008 
1009 	tp->irq_sync = 0;
1010 	wmb();
1011 
1012 	tw32(TG3PCI_MISC_HOST_CTRL,
1013 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014 
1015 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 	for (i = 0; i < tp->irq_cnt; i++) {
1017 		struct tg3_napi *tnapi = &tp->napi[i];
1018 
1019 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 		if (tg3_flag(tp, 1SHOT_MSI))
1021 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 
1023 		tp->coal_now |= tnapi->coal_now;
1024 	}
1025 
1026 	/* Force an initial interrupt */
1027 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030 	else
1031 		tw32(HOSTCC_MODE, tp->coal_now);
1032 
1033 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035 
tg3_has_work(struct tg3_napi * tnapi)1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038 	struct tg3 *tp = tnapi->tp;
1039 	struct tg3_hw_status *sblk = tnapi->hw_status;
1040 	unsigned int work_exists = 0;
1041 
1042 	/* check for phy events */
1043 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 		if (sblk->status & SD_STATUS_LINK_CHG)
1045 			work_exists = 1;
1046 	}
1047 
1048 	/* check for TX work to do */
1049 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050 		work_exists = 1;
1051 
1052 	/* check for RX work to do */
1053 	if (tnapi->rx_rcb_prod_idx &&
1054 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055 		work_exists = 1;
1056 
1057 	return work_exists;
1058 }
1059 
1060 /* tg3_int_reenable
1061  *  similar to tg3_enable_ints, but it accurately determines whether there
1062  *  is new work pending and can return without flushing the PIO write
1063  *  which reenables interrupts
1064  */
tg3_int_reenable(struct tg3_napi * tnapi)1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067 	struct tg3 *tp = tnapi->tp;
1068 
1069 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070 
1071 	/* When doing tagged status, this work check is unnecessary.
1072 	 * The last_tag we write above tells the chip which piece of
1073 	 * work we've completed.
1074 	 */
1075 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079 
tg3_switch_clocks(struct tg3 * tp)1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082 	u32 clock_ctrl;
1083 	u32 orig_clock_ctrl;
1084 
1085 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086 		return;
1087 
1088 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089 
1090 	orig_clock_ctrl = clock_ctrl;
1091 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 		       CLOCK_CTRL_CLKRUN_OENABLE |
1093 		       0x1f);
1094 	tp->pci_clock_ctrl = clock_ctrl;
1095 
1096 	if (tg3_flag(tp, 5705_PLUS)) {
1097 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100 		}
1101 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103 			    clock_ctrl |
1104 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105 			    40);
1106 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108 			    40);
1109 	}
1110 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112 
1113 #define PHY_BUSY_LOOPS	5000
1114 
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116 			 u32 *val)
1117 {
1118 	u32 frame_val;
1119 	unsigned int loops;
1120 	int ret;
1121 
1122 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123 		tw32_f(MAC_MI_MODE,
1124 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125 		udelay(80);
1126 	}
1127 
1128 	tg3_ape_lock(tp, tp->phy_ape_lock);
1129 
1130 	*val = 0x0;
1131 
1132 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 		      MI_COM_PHY_ADDR_MASK);
1134 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 		      MI_COM_REG_ADDR_MASK);
1136 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137 
1138 	tw32_f(MAC_MI_COM, frame_val);
1139 
1140 	loops = PHY_BUSY_LOOPS;
1141 	while (loops != 0) {
1142 		udelay(10);
1143 		frame_val = tr32(MAC_MI_COM);
1144 
1145 		if ((frame_val & MI_COM_BUSY) == 0) {
1146 			udelay(5);
1147 			frame_val = tr32(MAC_MI_COM);
1148 			break;
1149 		}
1150 		loops -= 1;
1151 	}
1152 
1153 	ret = -EBUSY;
1154 	if (loops != 0) {
1155 		*val = frame_val & MI_COM_DATA_MASK;
1156 		ret = 0;
1157 	}
1158 
1159 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1161 		udelay(80);
1162 	}
1163 
1164 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1165 
1166 	return ret;
1167 }
1168 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173 
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175 			  u32 val)
1176 {
1177 	u32 frame_val;
1178 	unsigned int loops;
1179 	int ret;
1180 
1181 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183 		return 0;
1184 
1185 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186 		tw32_f(MAC_MI_MODE,
1187 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188 		udelay(80);
1189 	}
1190 
1191 	tg3_ape_lock(tp, tp->phy_ape_lock);
1192 
1193 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 		      MI_COM_PHY_ADDR_MASK);
1195 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 		      MI_COM_REG_ADDR_MASK);
1197 	frame_val |= (val & MI_COM_DATA_MASK);
1198 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199 
1200 	tw32_f(MAC_MI_COM, frame_val);
1201 
1202 	loops = PHY_BUSY_LOOPS;
1203 	while (loops != 0) {
1204 		udelay(10);
1205 		frame_val = tr32(MAC_MI_COM);
1206 		if ((frame_val & MI_COM_BUSY) == 0) {
1207 			udelay(5);
1208 			frame_val = tr32(MAC_MI_COM);
1209 			break;
1210 		}
1211 		loops -= 1;
1212 	}
1213 
1214 	ret = -EBUSY;
1215 	if (loops != 0)
1216 		ret = 0;
1217 
1218 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1220 		udelay(80);
1221 	}
1222 
1223 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1224 
1225 	return ret;
1226 }
1227 
tg3_writephy(struct tg3 * tp,int reg,u32 val)1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232 
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235 	int err;
1236 
1237 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238 	if (err)
1239 		goto done;
1240 
1241 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242 	if (err)
1243 		goto done;
1244 
1245 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251 
1252 done:
1253 	return err;
1254 }
1255 
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258 	int err;
1259 
1260 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261 	if (err)
1262 		goto done;
1263 
1264 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265 	if (err)
1266 		goto done;
1267 
1268 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270 	if (err)
1271 		goto done;
1272 
1273 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274 
1275 done:
1276 	return err;
1277 }
1278 
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281 	int err;
1282 
1283 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 	if (!err)
1285 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286 
1287 	return err;
1288 }
1289 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292 	int err;
1293 
1294 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295 	if (!err)
1296 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297 
1298 	return err;
1299 }
1300 
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303 	int err;
1304 
1305 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1308 	if (!err)
1309 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310 
1311 	return err;
1312 }
1313 
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 		set |= MII_TG3_AUXCTL_MISC_WREN;
1318 
1319 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321 
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324 	u32 val;
1325 	int err;
1326 
1327 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328 
1329 	if (err)
1330 		return err;
1331 
1332 	if (enable)
1333 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334 	else
1335 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 
1337 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339 
1340 	return err;
1341 }
1342 
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348 
tg3_bmcr_reset(struct tg3 * tp)1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351 	u32 phy_control;
1352 	int limit, err;
1353 
1354 	/* OK, reset it, and poll the BMCR_RESET bit until it
1355 	 * clears or we time out.
1356 	 */
1357 	phy_control = BMCR_RESET;
1358 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1359 	if (err != 0)
1360 		return -EBUSY;
1361 
1362 	limit = 5000;
1363 	while (limit--) {
1364 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365 		if (err != 0)
1366 			return -EBUSY;
1367 
1368 		if ((phy_control & BMCR_RESET) == 0) {
1369 			udelay(40);
1370 			break;
1371 		}
1372 		udelay(10);
1373 	}
1374 	if (limit < 0)
1375 		return -EBUSY;
1376 
1377 	return 0;
1378 }
1379 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382 	struct tg3 *tp = bp->priv;
1383 	u32 val;
1384 
1385 	spin_lock_bh(&tp->lock);
1386 
1387 	if (__tg3_readphy(tp, mii_id, reg, &val))
1388 		val = -EIO;
1389 
1390 	spin_unlock_bh(&tp->lock);
1391 
1392 	return val;
1393 }
1394 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397 	struct tg3 *tp = bp->priv;
1398 	u32 ret = 0;
1399 
1400 	spin_lock_bh(&tp->lock);
1401 
1402 	if (__tg3_writephy(tp, mii_id, reg, val))
1403 		ret = -EIO;
1404 
1405 	spin_unlock_bh(&tp->lock);
1406 
1407 	return ret;
1408 }
1409 
tg3_mdio_config_5785(struct tg3 * tp)1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412 	u32 val;
1413 	struct phy_device *phydev;
1414 
1415 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 	case PHY_ID_BCM50610:
1418 	case PHY_ID_BCM50610M:
1419 		val = MAC_PHYCFG2_50610_LED_MODES;
1420 		break;
1421 	case PHY_ID_BCMAC131:
1422 		val = MAC_PHYCFG2_AC131_LED_MODES;
1423 		break;
1424 	case PHY_ID_RTL8211C:
1425 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426 		break;
1427 	case PHY_ID_RTL8201E:
1428 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429 		break;
1430 	default:
1431 		return;
1432 	}
1433 
1434 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 		tw32(MAC_PHYCFG2, val);
1436 
1437 		val = tr32(MAC_PHYCFG1);
1438 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 		tw32(MAC_PHYCFG1, val);
1442 
1443 		return;
1444 	}
1445 
1446 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1449 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1450 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1451 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1452 		       MAC_PHYCFG2_INBAND_ENABLE;
1453 
1454 	tw32(MAC_PHYCFG2, val);
1455 
1456 	val = tr32(MAC_PHYCFG1);
1457 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464 	}
1465 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 	tw32(MAC_PHYCFG1, val);
1468 
1469 	val = tr32(MAC_EXT_RGMII_MODE);
1470 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 		 MAC_RGMII_MODE_RX_QUALITY |
1472 		 MAC_RGMII_MODE_RX_ACTIVITY |
1473 		 MAC_RGMII_MODE_RX_ENG_DET |
1474 		 MAC_RGMII_MODE_TX_ENABLE |
1475 		 MAC_RGMII_MODE_TX_LOWPWR |
1476 		 MAC_RGMII_MODE_TX_RESET);
1477 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 			val |= MAC_RGMII_MODE_RX_INT_B |
1480 			       MAC_RGMII_MODE_RX_QUALITY |
1481 			       MAC_RGMII_MODE_RX_ACTIVITY |
1482 			       MAC_RGMII_MODE_RX_ENG_DET;
1483 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 			val |= MAC_RGMII_MODE_TX_ENABLE |
1485 			       MAC_RGMII_MODE_TX_LOWPWR |
1486 			       MAC_RGMII_MODE_TX_RESET;
1487 	}
1488 	tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490 
tg3_mdio_start(struct tg3 * tp)1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1495 	udelay(80);
1496 
1497 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1499 		tg3_mdio_config_5785(tp);
1500 }
1501 
tg3_mdio_init(struct tg3 * tp)1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504 	int i;
1505 	u32 reg;
1506 	struct phy_device *phydev;
1507 
1508 	if (tg3_flag(tp, 5717_PLUS)) {
1509 		u32 is_serdes;
1510 
1511 		tp->phy_addr = tp->pci_fn + 1;
1512 
1513 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515 		else
1516 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1518 		if (is_serdes)
1519 			tp->phy_addr += 7;
1520 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521 		int addr;
1522 
1523 		addr = ssb_gige_get_phyaddr(tp->pdev);
1524 		if (addr < 0)
1525 			return addr;
1526 		tp->phy_addr = addr;
1527 	} else
1528 		tp->phy_addr = TG3_PHY_MII_ADDR;
1529 
1530 	tg3_mdio_start(tp);
1531 
1532 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533 		return 0;
1534 
1535 	tp->mdio_bus = mdiobus_alloc();
1536 	if (tp->mdio_bus == NULL)
1537 		return -ENOMEM;
1538 
1539 	tp->mdio_bus->name     = "tg3 mdio bus";
1540 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 	tp->mdio_bus->priv     = tp;
1543 	tp->mdio_bus->parent   = &tp->pdev->dev;
1544 	tp->mdio_bus->read     = &tg3_mdio_read;
1545 	tp->mdio_bus->write    = &tg3_mdio_write;
1546 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547 
1548 	/* The bus registration will look for all the PHYs on the mdio bus.
1549 	 * Unfortunately, it does not ensure the PHY is powered up before
1550 	 * accessing the PHY ID registers.  A chip reset is the
1551 	 * quickest way to bring the device back to an operational state..
1552 	 */
1553 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554 		tg3_bmcr_reset(tp);
1555 
1556 	i = mdiobus_register(tp->mdio_bus);
1557 	if (i) {
1558 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 		mdiobus_free(tp->mdio_bus);
1560 		return i;
1561 	}
1562 
1563 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564 
1565 	if (!phydev || !phydev->drv) {
1566 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 		mdiobus_unregister(tp->mdio_bus);
1568 		mdiobus_free(tp->mdio_bus);
1569 		return -ENODEV;
1570 	}
1571 
1572 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 	case PHY_ID_BCM57780:
1574 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576 		break;
1577 	case PHY_ID_BCM50610:
1578 	case PHY_ID_BCM50610M:
1579 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 				     PHY_BRCM_RX_REFCLK_UNUSED |
1581 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 		fallthrough;
1584 	case PHY_ID_RTL8211C:
1585 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586 		break;
1587 	case PHY_ID_RTL8201E:
1588 	case PHY_ID_BCMAC131:
1589 		phydev->interface = PHY_INTERFACE_MODE_MII;
1590 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592 		break;
1593 	}
1594 
1595 	tg3_flag_set(tp, MDIOBUS_INITED);
1596 
1597 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 		tg3_mdio_config_5785(tp);
1599 
1600 	return 0;
1601 }
1602 
tg3_mdio_fini(struct tg3 * tp)1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 		tg3_flag_clear(tp, MDIOBUS_INITED);
1607 		mdiobus_unregister(tp->mdio_bus);
1608 		mdiobus_free(tp->mdio_bus);
1609 	}
1610 }
1611 
1612 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615 	u32 val;
1616 
1617 	val = tr32(GRC_RX_CPU_EVENT);
1618 	val |= GRC_RX_CPU_DRIVER_EVENT;
1619 	tw32_f(GRC_RX_CPU_EVENT, val);
1620 
1621 	tp->last_event_jiffies = jiffies;
1622 }
1623 
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625 
1626 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629 	int i;
1630 	unsigned int delay_cnt;
1631 	long time_remain;
1632 
1633 	/* If enough time has passed, no wait is necessary. */
1634 	time_remain = (long)(tp->last_event_jiffies + 1 +
1635 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636 		      (long)jiffies;
1637 	if (time_remain < 0)
1638 		return;
1639 
1640 	/* Check if we can shorten the wait time. */
1641 	delay_cnt = jiffies_to_usecs(time_remain);
1642 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 	delay_cnt = (delay_cnt >> 3) + 1;
1645 
1646 	for (i = 0; i < delay_cnt; i++) {
1647 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648 			break;
1649 		if (pci_channel_offline(tp->pdev))
1650 			break;
1651 
1652 		udelay(8);
1653 	}
1654 }
1655 
1656 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659 	u32 reg, val;
1660 
1661 	val = 0;
1662 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1663 		val = reg << 16;
1664 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1665 		val |= (reg & 0xffff);
1666 	*data++ = val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_LPA, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678 			val = reg << 16;
1679 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680 			val |= (reg & 0xffff);
1681 	}
1682 	*data++ = val;
1683 
1684 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685 		val = reg << 16;
1686 	else
1687 		val = 0;
1688 	*data++ = val;
1689 }
1690 
1691 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694 	u32 data[4];
1695 
1696 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697 		return;
1698 
1699 	tg3_phy_gather_ump_data(tp, data);
1700 
1701 	tg3_wait_for_event_ack(tp);
1702 
1703 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709 
1710 	tg3_generate_fw_event(tp);
1711 }
1712 
1713 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 		/* Wait for RX cpu to ACK the previous event. */
1718 		tg3_wait_for_event_ack(tp);
1719 
1720 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721 
1722 		tg3_generate_fw_event(tp);
1723 
1724 		/* Wait for RX cpu to ACK this event. */
1725 		tg3_wait_for_event_ack(tp);
1726 	}
1727 }
1728 
1729 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734 
1735 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736 		switch (kind) {
1737 		case RESET_KIND_INIT:
1738 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 				      DRV_STATE_START);
1740 			break;
1741 
1742 		case RESET_KIND_SHUTDOWN:
1743 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744 				      DRV_STATE_UNLOAD);
1745 			break;
1746 
1747 		case RESET_KIND_SUSPEND:
1748 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 				      DRV_STATE_SUSPEND);
1750 			break;
1751 
1752 		default:
1753 			break;
1754 		}
1755 	}
1756 }
1757 
1758 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762 		switch (kind) {
1763 		case RESET_KIND_INIT:
1764 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 				      DRV_STATE_START_DONE);
1766 			break;
1767 
1768 		case RESET_KIND_SHUTDOWN:
1769 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 				      DRV_STATE_UNLOAD_DONE);
1771 			break;
1772 
1773 		default:
1774 			break;
1775 		}
1776 	}
1777 }
1778 
1779 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782 	if (tg3_flag(tp, ENABLE_ASF)) {
1783 		switch (kind) {
1784 		case RESET_KIND_INIT:
1785 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786 				      DRV_STATE_START);
1787 			break;
1788 
1789 		case RESET_KIND_SHUTDOWN:
1790 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791 				      DRV_STATE_UNLOAD);
1792 			break;
1793 
1794 		case RESET_KIND_SUSPEND:
1795 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 				      DRV_STATE_SUSPEND);
1797 			break;
1798 
1799 		default:
1800 			break;
1801 		}
1802 	}
1803 }
1804 
tg3_poll_fw(struct tg3 * tp)1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807 	int i;
1808 	u32 val;
1809 
1810 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1811 		return 0;
1812 
1813 	if (tg3_flag(tp, IS_SSB_CORE)) {
1814 		/* We don't use firmware. */
1815 		return 0;
1816 	}
1817 
1818 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 		/* Wait up to 20ms for init done. */
1820 		for (i = 0; i < 200; i++) {
1821 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822 				return 0;
1823 			if (pci_channel_offline(tp->pdev))
1824 				return -ENODEV;
1825 
1826 			udelay(100);
1827 		}
1828 		return -ENODEV;
1829 	}
1830 
1831 	/* Wait for firmware initialization to complete. */
1832 	for (i = 0; i < 100000; i++) {
1833 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835 			break;
1836 		if (pci_channel_offline(tp->pdev)) {
1837 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 				netdev_info(tp->dev, "No firmware running\n");
1840 			}
1841 
1842 			break;
1843 		}
1844 
1845 		udelay(10);
1846 	}
1847 
1848 	/* Chip might not be fitted with firmware.  Some Sun onboard
1849 	 * parts are configured like that.  So don't signal the timeout
1850 	 * of the above loop as an error, but do report the lack of
1851 	 * running firmware once.
1852 	 */
1853 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1855 
1856 		netdev_info(tp->dev, "No firmware running\n");
1857 	}
1858 
1859 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 		/* The 57765 A0 needs a little more
1861 		 * time to do some important work.
1862 		 */
1863 		mdelay(10);
1864 	}
1865 
1866 	return 0;
1867 }
1868 
tg3_link_report(struct tg3 * tp)1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871 	if (!netif_carrier_ok(tp->dev)) {
1872 		netif_info(tp, link, tp->dev, "Link is down\n");
1873 		tg3_ump_link_report(tp);
1874 	} else if (netif_msg_link(tp)) {
1875 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 			    (tp->link_config.active_speed == SPEED_1000 ?
1877 			     1000 :
1878 			     (tp->link_config.active_speed == SPEED_100 ?
1879 			      100 : 10)),
1880 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1881 			     "full" : "half"));
1882 
1883 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885 			    "on" : "off",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887 			    "on" : "off");
1888 
1889 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 			netdev_info(tp->dev, "EEE is %s\n",
1891 				    tp->setlpicnt ? "enabled" : "disabled");
1892 
1893 		tg3_ump_link_report(tp);
1894 	}
1895 
1896 	tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898 
tg3_decode_flowctrl_1000T(u32 adv)1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901 	u32 flowctrl = 0;
1902 
1903 	if (adv & ADVERTISE_PAUSE_CAP) {
1904 		flowctrl |= FLOW_CTRL_RX;
1905 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 			flowctrl |= FLOW_CTRL_TX;
1907 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1908 		flowctrl |= FLOW_CTRL_TX;
1909 
1910 	return flowctrl;
1911 }
1912 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915 	u16 miireg;
1916 
1917 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 		miireg = ADVERTISE_1000XPAUSE;
1919 	else if (flow_ctrl & FLOW_CTRL_TX)
1920 		miireg = ADVERTISE_1000XPSE_ASYM;
1921 	else if (flow_ctrl & FLOW_CTRL_RX)
1922 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923 	else
1924 		miireg = 0;
1925 
1926 	return miireg;
1927 }
1928 
tg3_decode_flowctrl_1000X(u32 adv)1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931 	u32 flowctrl = 0;
1932 
1933 	if (adv & ADVERTISE_1000XPAUSE) {
1934 		flowctrl |= FLOW_CTRL_RX;
1935 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 			flowctrl |= FLOW_CTRL_TX;
1937 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 		flowctrl |= FLOW_CTRL_TX;
1939 
1940 	return flowctrl;
1941 }
1942 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945 	u8 cap = 0;
1946 
1947 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 		if (lcladv & ADVERTISE_1000XPAUSE)
1951 			cap = FLOW_CTRL_RX;
1952 		if (rmtadv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_TX;
1954 	}
1955 
1956 	return cap;
1957 }
1958 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961 	u8 autoneg;
1962 	u8 flowctrl = 0;
1963 	u32 old_rx_mode = tp->rx_mode;
1964 	u32 old_tx_mode = tp->tx_mode;
1965 
1966 	if (tg3_flag(tp, USE_PHYLIB))
1967 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968 	else
1969 		autoneg = tp->link_config.autoneg;
1970 
1971 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974 		else
1975 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976 	} else
1977 		flowctrl = tp->link_config.flowctrl;
1978 
1979 	tp->link_config.active_flowctrl = flowctrl;
1980 
1981 	if (flowctrl & FLOW_CTRL_RX)
1982 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983 	else
1984 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985 
1986 	if (old_rx_mode != tp->rx_mode)
1987 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1988 
1989 	if (flowctrl & FLOW_CTRL_TX)
1990 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991 	else
1992 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993 
1994 	if (old_tx_mode != tp->tx_mode)
1995 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997 
tg3_adjust_link(struct net_device * dev)1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000 	u8 oldflowctrl, linkmesg = 0;
2001 	u32 mac_mode, lcl_adv, rmt_adv;
2002 	struct tg3 *tp = netdev_priv(dev);
2003 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004 
2005 	spin_lock_bh(&tp->lock);
2006 
2007 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 				    MAC_MODE_HALF_DUPLEX);
2009 
2010 	oldflowctrl = tp->link_config.active_flowctrl;
2011 
2012 	if (phydev->link) {
2013 		lcl_adv = 0;
2014 		rmt_adv = 0;
2015 
2016 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 		else if (phydev->speed == SPEED_1000 ||
2019 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021 		else
2022 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2023 
2024 		if (phydev->duplex == DUPLEX_HALF)
2025 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2026 		else {
2027 			lcl_adv = mii_advertise_flowctrl(
2028 				  tp->link_config.flowctrl);
2029 
2030 			if (phydev->pause)
2031 				rmt_adv = LPA_PAUSE_CAP;
2032 			if (phydev->asym_pause)
2033 				rmt_adv |= LPA_PAUSE_ASYM;
2034 		}
2035 
2036 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037 	} else
2038 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 
2040 	if (mac_mode != tp->mac_mode) {
2041 		tp->mac_mode = mac_mode;
2042 		tw32_f(MAC_MODE, tp->mac_mode);
2043 		udelay(40);
2044 	}
2045 
2046 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 		if (phydev->speed == SPEED_10)
2048 			tw32(MAC_MI_STAT,
2049 			     MAC_MI_STAT_10MBPS_MODE |
2050 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051 		else
2052 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 	}
2054 
2055 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 		tw32(MAC_TX_LENGTHS,
2057 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2059 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060 	else
2061 		tw32(MAC_TX_LENGTHS,
2062 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2064 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065 
2066 	if (phydev->link != tp->old_link ||
2067 	    phydev->speed != tp->link_config.active_speed ||
2068 	    phydev->duplex != tp->link_config.active_duplex ||
2069 	    oldflowctrl != tp->link_config.active_flowctrl)
2070 		linkmesg = 1;
2071 
2072 	tp->old_link = phydev->link;
2073 	tp->link_config.active_speed = phydev->speed;
2074 	tp->link_config.active_duplex = phydev->duplex;
2075 
2076 	spin_unlock_bh(&tp->lock);
2077 
2078 	if (linkmesg)
2079 		tg3_link_report(tp);
2080 }
2081 
tg3_phy_init(struct tg3 * tp)2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084 	struct phy_device *phydev;
2085 
2086 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087 		return 0;
2088 
2089 	/* Bring the PHY back to a known state. */
2090 	tg3_bmcr_reset(tp);
2091 
2092 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093 
2094 	/* Attach the MAC to the PHY. */
2095 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 			     tg3_adjust_link, phydev->interface);
2097 	if (IS_ERR(phydev)) {
2098 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 		return PTR_ERR(phydev);
2100 	}
2101 
2102 	/* Mask with MAC supported features. */
2103 	switch (phydev->interface) {
2104 	case PHY_INTERFACE_MODE_GMII:
2105 	case PHY_INTERFACE_MODE_RGMII:
2106 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 			phy_set_max_speed(phydev, SPEED_1000);
2108 			phy_support_asym_pause(phydev);
2109 			break;
2110 		}
2111 		fallthrough;
2112 	case PHY_INTERFACE_MODE_MII:
2113 		phy_set_max_speed(phydev, SPEED_100);
2114 		phy_support_asym_pause(phydev);
2115 		break;
2116 	default:
2117 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2118 		return -EINVAL;
2119 	}
2120 
2121 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2122 
2123 	phy_attached_info(phydev);
2124 
2125 	return 0;
2126 }
2127 
tg3_phy_start(struct tg3 * tp)2128 static void tg3_phy_start(struct tg3 *tp)
2129 {
2130 	struct phy_device *phydev;
2131 
2132 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 		return;
2134 
2135 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2136 
2137 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139 		phydev->speed = tp->link_config.speed;
2140 		phydev->duplex = tp->link_config.duplex;
2141 		phydev->autoneg = tp->link_config.autoneg;
2142 		ethtool_convert_legacy_u32_to_link_mode(
2143 			phydev->advertising, tp->link_config.advertising);
2144 	}
2145 
2146 	phy_start(phydev);
2147 
2148 	phy_start_aneg(phydev);
2149 }
2150 
tg3_phy_stop(struct tg3 * tp)2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 		return;
2155 
2156 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2157 }
2158 
tg3_phy_fini(struct tg3 * tp)2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 	}
2165 }
2166 
tg3_phy_set_extloopbk(struct tg3 * tp)2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169 	int err;
2170 	u32 val;
2171 
2172 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173 		return 0;
2174 
2175 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 		/* Cannot do read-modify-write on 5401 */
2177 		err = tg3_phy_auxctl_write(tp,
2178 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180 					   0x4c20);
2181 		goto done;
2182 	}
2183 
2184 	err = tg3_phy_auxctl_read(tp,
2185 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 	if (err)
2187 		return err;
2188 
2189 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 	err = tg3_phy_auxctl_write(tp,
2191 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192 
2193 done:
2194 	return err;
2195 }
2196 
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199 	u32 phytest;
2200 
2201 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202 		u32 phy;
2203 
2204 		tg3_writephy(tp, MII_TG3_FET_TEST,
2205 			     phytest | MII_TG3_FET_SHADOW_EN);
2206 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207 			if (enable)
2208 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209 			else
2210 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212 		}
2213 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 	}
2215 }
2216 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219 	u32 reg;
2220 
2221 	if (!tg3_flag(tp, 5705_PLUS) ||
2222 	    (tg3_flag(tp, 5717_PLUS) &&
2223 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224 		return;
2225 
2226 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 		tg3_phy_fet_toggle_apd(tp, enable);
2228 		return;
2229 	}
2230 
2231 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2234 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2235 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2237 
2238 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2239 
2240 
2241 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2242 	if (enable)
2243 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2244 
2245 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2246 }
2247 
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2249 {
2250 	u32 phy;
2251 
2252 	if (!tg3_flag(tp, 5705_PLUS) ||
2253 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2254 		return;
2255 
2256 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2257 		u32 ephy;
2258 
2259 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2261 
2262 			tg3_writephy(tp, MII_TG3_FET_TEST,
2263 				     ephy | MII_TG3_FET_SHADOW_EN);
2264 			if (!tg3_readphy(tp, reg, &phy)) {
2265 				if (enable)
2266 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2267 				else
2268 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				tg3_writephy(tp, reg, phy);
2270 			}
2271 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2272 		}
2273 	} else {
2274 		int ret;
2275 
2276 		ret = tg3_phy_auxctl_read(tp,
2277 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2278 		if (!ret) {
2279 			if (enable)
2280 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2281 			else
2282 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			tg3_phy_auxctl_write(tp,
2284 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2285 		}
2286 	}
2287 }
2288 
tg3_phy_set_wirespeed(struct tg3 * tp)2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2290 {
2291 	int ret;
2292 	u32 val;
2293 
2294 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2295 		return;
2296 
2297 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2298 	if (!ret)
2299 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2301 }
2302 
tg3_phy_apply_otp(struct tg3 * tp)2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2304 {
2305 	u32 otp, phy;
2306 
2307 	if (!tp->phy_otp)
2308 		return;
2309 
2310 	otp = tp->phy_otp;
2311 
2312 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2313 		return;
2314 
2315 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2318 
2319 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2322 
2323 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2326 
2327 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2329 
2330 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2332 
2333 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2336 
2337 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2338 }
2339 
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_eee * eee)2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2341 {
2342 	u32 val;
2343 	struct ethtool_eee *dest = &tp->eee;
2344 
2345 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2346 		return;
2347 
2348 	if (eee)
2349 		dest = eee;
2350 
2351 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2352 		return;
2353 
2354 	/* Pull eee_active */
2355 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357 		dest->eee_active = 1;
2358 	} else
2359 		dest->eee_active = 0;
2360 
2361 	/* Pull lp advertised settings */
2362 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2363 		return;
2364 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365 
2366 	/* Pull advertised and eee_enabled settings */
2367 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2368 		return;
2369 	dest->eee_enabled = !!val;
2370 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371 
2372 	/* Pull tx_lpi_enabled */
2373 	val = tr32(TG3_CPMU_EEE_MODE);
2374 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2375 
2376 	/* Pull lpi timer value */
2377 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2378 }
2379 
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2381 {
2382 	u32 val;
2383 
2384 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2385 		return;
2386 
2387 	tp->setlpicnt = 0;
2388 
2389 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2390 	    current_link_up &&
2391 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2392 	    (tp->link_config.active_speed == SPEED_100 ||
2393 	     tp->link_config.active_speed == SPEED_1000)) {
2394 		u32 eeectl;
2395 
2396 		if (tp->link_config.active_speed == SPEED_1000)
2397 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2398 		else
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2400 
2401 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2402 
2403 		tg3_eee_pull_config(tp, NULL);
2404 		if (tp->eee.eee_active)
2405 			tp->setlpicnt = 2;
2406 	}
2407 
2408 	if (!tp->setlpicnt) {
2409 		if (current_link_up &&
2410 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2413 		}
2414 
2415 		val = tr32(TG3_CPMU_EEE_MODE);
2416 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2417 	}
2418 }
2419 
tg3_phy_eee_enable(struct tg3 * tp)2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2421 {
2422 	u32 val;
2423 
2424 	if (tp->link_config.active_speed == SPEED_1000 &&
2425 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427 	     tg3_flag(tp, 57765_CLASS)) &&
2428 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 		val = MII_TG3_DSP_TAP26_ALNOKO |
2430 		      MII_TG3_DSP_TAP26_RMRXSTO;
2431 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2433 	}
2434 
2435 	val = tr32(TG3_CPMU_EEE_MODE);
2436 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2437 }
2438 
tg3_wait_macro_done(struct tg3 * tp)2439 static int tg3_wait_macro_done(struct tg3 *tp)
2440 {
2441 	int limit = 100;
2442 
2443 	while (limit--) {
2444 		u32 tmp32;
2445 
2446 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447 			if ((tmp32 & 0x1000) == 0)
2448 				break;
2449 		}
2450 	}
2451 	if (limit < 0)
2452 		return -EBUSY;
2453 
2454 	return 0;
2455 }
2456 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2458 {
2459 	static const u32 test_pat[4][6] = {
2460 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2464 	};
2465 	int chan;
2466 
2467 	for (chan = 0; chan < 4; chan++) {
2468 		int i;
2469 
2470 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471 			     (chan * 0x2000) | 0x0200);
2472 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2473 
2474 		for (i = 0; i < 6; i++)
2475 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2476 				     test_pat[chan][i]);
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479 		if (tg3_wait_macro_done(tp)) {
2480 			*resetp = 1;
2481 			return -EBUSY;
2482 		}
2483 
2484 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 			     (chan * 0x2000) | 0x0200);
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493 		if (tg3_wait_macro_done(tp)) {
2494 			*resetp = 1;
2495 			return -EBUSY;
2496 		}
2497 
2498 		for (i = 0; i < 6; i += 2) {
2499 			u32 low, high;
2500 
2501 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503 			    tg3_wait_macro_done(tp)) {
2504 				*resetp = 1;
2505 				return -EBUSY;
2506 			}
2507 			low &= 0x7fff;
2508 			high &= 0x000f;
2509 			if (low != test_pat[chan][i] ||
2510 			    high != test_pat[chan][i+1]) {
2511 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2514 
2515 				return -EBUSY;
2516 			}
2517 		}
2518 	}
2519 
2520 	return 0;
2521 }
2522 
tg3_phy_reset_chanpat(struct tg3 * tp)2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2524 {
2525 	int chan;
2526 
2527 	for (chan = 0; chan < 4; chan++) {
2528 		int i;
2529 
2530 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531 			     (chan * 0x2000) | 0x0200);
2532 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533 		for (i = 0; i < 6; i++)
2534 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536 		if (tg3_wait_macro_done(tp))
2537 			return -EBUSY;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
tg3_phy_reset_5703_4_5(struct tg3 * tp)2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2544 {
2545 	u32 reg32, phy9_orig;
2546 	int retries, do_phy_reset, err;
2547 
2548 	retries = 10;
2549 	do_phy_reset = 1;
2550 	do {
2551 		if (do_phy_reset) {
2552 			err = tg3_bmcr_reset(tp);
2553 			if (err)
2554 				return err;
2555 			do_phy_reset = 0;
2556 		}
2557 
2558 		/* Disable transmitter and interrupt.  */
2559 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2560 			continue;
2561 
2562 		reg32 |= 0x3000;
2563 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2564 
2565 		/* Set full-duplex, 1000 mbps.  */
2566 		tg3_writephy(tp, MII_BMCR,
2567 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2568 
2569 		/* Set to master mode.  */
2570 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2571 			continue;
2572 
2573 		tg3_writephy(tp, MII_CTRL1000,
2574 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2575 
2576 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2577 		if (err)
2578 			return err;
2579 
2580 		/* Block the PHY control access.  */
2581 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2582 
2583 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2584 		if (!err)
2585 			break;
2586 	} while (--retries);
2587 
2588 	err = tg3_phy_reset_chanpat(tp);
2589 	if (err)
2590 		return err;
2591 
2592 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2593 
2594 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2596 
2597 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2598 
2599 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2600 
2601 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2602 	if (err)
2603 		return err;
2604 
2605 	reg32 &= ~0x3000;
2606 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2607 
2608 	return 0;
2609 }
2610 
tg3_carrier_off(struct tg3 * tp)2611 static void tg3_carrier_off(struct tg3 *tp)
2612 {
2613 	netif_carrier_off(tp->dev);
2614 	tp->link_up = false;
2615 }
2616 
tg3_warn_mgmt_link_flap(struct tg3 * tp)2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2618 {
2619 	if (tg3_flag(tp, ENABLE_ASF))
2620 		netdev_warn(tp->dev,
2621 			    "Management side-band traffic will be interrupted during phy settings change\n");
2622 }
2623 
2624 /* This will reset the tigon3 PHY if there is no valid
2625  * link unless the FORCE argument is non-zero.
2626  */
tg3_phy_reset(struct tg3 * tp)2627 static int tg3_phy_reset(struct tg3 *tp)
2628 {
2629 	u32 val, cpmuctrl;
2630 	int err;
2631 
2632 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633 		val = tr32(GRC_MISC_CFG);
2634 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2635 		udelay(40);
2636 	}
2637 	err  = tg3_readphy(tp, MII_BMSR, &val);
2638 	err |= tg3_readphy(tp, MII_BMSR, &val);
2639 	if (err != 0)
2640 		return -EBUSY;
2641 
2642 	if (netif_running(tp->dev) && tp->link_up) {
2643 		netif_carrier_off(tp->dev);
2644 		tg3_link_report(tp);
2645 	}
2646 
2647 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2650 		err = tg3_phy_reset_5703_4_5(tp);
2651 		if (err)
2652 			return err;
2653 		goto out;
2654 	}
2655 
2656 	cpmuctrl = 0;
2657 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2660 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2661 			tw32(TG3_CPMU_CTRL,
2662 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2663 	}
2664 
2665 	err = tg3_bmcr_reset(tp);
2666 	if (err)
2667 		return err;
2668 
2669 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2672 
2673 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2674 	}
2675 
2676 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2681 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2682 			udelay(40);
2683 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2684 		}
2685 	}
2686 
2687 	if (tg3_flag(tp, 5717_PLUS) &&
2688 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2689 		return 0;
2690 
2691 	tg3_phy_apply_otp(tp);
2692 
2693 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694 		tg3_phy_toggle_apd(tp, true);
2695 	else
2696 		tg3_phy_toggle_apd(tp, false);
2697 
2698 out:
2699 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2703 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2704 	}
2705 
2706 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 	}
2710 
2711 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2714 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2715 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2717 		}
2718 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723 				tg3_writephy(tp, MII_TG3_TEST1,
2724 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2725 			} else
2726 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2727 
2728 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2729 		}
2730 	}
2731 
2732 	/* Set Extended packet length bit (bit 14) on all chips that */
2733 	/* support jumbo frames */
2734 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735 		/* Cannot do read-modify-write on 5401 */
2736 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 		/* Set bit 14 with read-modify-write to preserve other bits */
2739 		err = tg3_phy_auxctl_read(tp,
2740 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2741 		if (!err)
2742 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2744 	}
2745 
2746 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747 	 * jumbo frames transmission.
2748 	 */
2749 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2753 	}
2754 
2755 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756 		/* adjust output voltage */
2757 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2758 	}
2759 
2760 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2762 
2763 	tg3_phy_toggle_automdix(tp, true);
2764 	tg3_phy_set_wirespeed(tp);
2765 	return 0;
2766 }
2767 
2768 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2770 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2771 					  TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2777 
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2783 
tg3_set_function_status(struct tg3 * tp,u32 newstat)2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2785 {
2786 	u32 status, shift;
2787 
2788 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2790 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2791 	else
2792 		status = tr32(TG3_CPMU_DRV_STATUS);
2793 
2794 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2796 	status |= (newstat << shift);
2797 
2798 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2800 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2801 	else
2802 		tw32(TG3_CPMU_DRV_STATUS, status);
2803 
2804 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2805 }
2806 
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2808 {
2809 	if (!tg3_flag(tp, IS_NIC))
2810 		return 0;
2811 
2812 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2815 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2816 			return -EIO;
2817 
2818 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2819 
2820 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2822 
2823 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2824 	} else {
2825 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 	}
2828 
2829 	return 0;
2830 }
2831 
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2833 {
2834 	u32 grc_local_ctrl;
2835 
2836 	if (!tg3_flag(tp, IS_NIC) ||
2837 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2839 		return;
2840 
2841 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2842 
2843 	tw32_wait_f(GRC_LOCAL_CTRL,
2844 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 
2847 	tw32_wait_f(GRC_LOCAL_CTRL,
2848 		    grc_local_ctrl,
2849 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 
2851 	tw32_wait_f(GRC_LOCAL_CTRL,
2852 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 }
2855 
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2857 {
2858 	if (!tg3_flag(tp, IS_NIC))
2859 		return;
2860 
2861 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2863 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 			    (GRC_LCLCTRL_GPIO_OE0 |
2865 			     GRC_LCLCTRL_GPIO_OE1 |
2866 			     GRC_LCLCTRL_GPIO_OE2 |
2867 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2868 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2869 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874 				     GRC_LCLCTRL_GPIO_OE1 |
2875 				     GRC_LCLCTRL_GPIO_OE2 |
2876 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2877 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2878 				     tp->grc_local_ctrl;
2879 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2881 
2882 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2885 
2886 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 	} else {
2890 		u32 no_gpio2;
2891 		u32 grc_local_ctrl = 0;
2892 
2893 		/* Workaround to prevent overdrawing Amps. */
2894 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2897 				    grc_local_ctrl,
2898 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2899 		}
2900 
2901 		/* On 5753 and variants, GPIO2 cannot be used. */
2902 		no_gpio2 = tp->nic_sram_data_cfg &
2903 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2904 
2905 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906 				  GRC_LCLCTRL_GPIO_OE1 |
2907 				  GRC_LCLCTRL_GPIO_OE2 |
2908 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2909 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2910 		if (no_gpio2) {
2911 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2913 		}
2914 		tw32_wait_f(GRC_LOCAL_CTRL,
2915 			    tp->grc_local_ctrl | grc_local_ctrl,
2916 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2917 
2918 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2919 
2920 		tw32_wait_f(GRC_LOCAL_CTRL,
2921 			    tp->grc_local_ctrl | grc_local_ctrl,
2922 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2923 
2924 		if (!no_gpio2) {
2925 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926 			tw32_wait_f(GRC_LOCAL_CTRL,
2927 				    tp->grc_local_ctrl | grc_local_ctrl,
2928 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2929 		}
2930 	}
2931 }
2932 
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2934 {
2935 	u32 msg = 0;
2936 
2937 	/* Serialize power state transitions */
2938 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2939 		return;
2940 
2941 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942 		msg = TG3_GPIO_MSG_NEED_VAUX;
2943 
2944 	msg = tg3_set_function_status(tp, msg);
2945 
2946 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2947 		goto done;
2948 
2949 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950 		tg3_pwrsrc_switch_to_vaux(tp);
2951 	else
2952 		tg3_pwrsrc_die_with_vmain(tp);
2953 
2954 done:
2955 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2956 }
2957 
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2959 {
2960 	bool need_vaux = false;
2961 
2962 	/* The GPIOs do something completely different on 57765. */
2963 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2964 		return;
2965 
2966 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2969 		tg3_frob_aux_power_5717(tp, include_wol ?
2970 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2971 		return;
2972 	}
2973 
2974 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975 		struct net_device *dev_peer;
2976 
2977 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2978 
2979 		/* remove_one() may have been run on the peer. */
2980 		if (dev_peer) {
2981 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2982 
2983 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2984 				return;
2985 
2986 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987 			    tg3_flag(tp_peer, ENABLE_ASF))
2988 				need_vaux = true;
2989 		}
2990 	}
2991 
2992 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993 	    tg3_flag(tp, ENABLE_ASF))
2994 		need_vaux = true;
2995 
2996 	if (need_vaux)
2997 		tg3_pwrsrc_switch_to_vaux(tp);
2998 	else
2999 		tg3_pwrsrc_die_with_vmain(tp);
3000 }
3001 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3003 {
3004 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3005 		return 1;
3006 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007 		if (speed != SPEED_10)
3008 			return 1;
3009 	} else if (speed == SPEED_10)
3010 		return 1;
3011 
3012 	return 0;
3013 }
3014 
tg3_phy_power_bug(struct tg3 * tp)3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3016 {
3017 	switch (tg3_asic_rev(tp)) {
3018 	case ASIC_REV_5700:
3019 	case ASIC_REV_5704:
3020 		return true;
3021 	case ASIC_REV_5780:
3022 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3023 			return true;
3024 		return false;
3025 	case ASIC_REV_5717:
3026 		if (!tp->pci_fn)
3027 			return true;
3028 		return false;
3029 	case ASIC_REV_5719:
3030 	case ASIC_REV_5720:
3031 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3032 		    !tp->pci_fn)
3033 			return true;
3034 		return false;
3035 	}
3036 
3037 	return false;
3038 }
3039 
tg3_phy_led_bug(struct tg3 * tp)3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3041 {
3042 	switch (tg3_asic_rev(tp)) {
3043 	case ASIC_REV_5719:
3044 	case ASIC_REV_5720:
3045 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3046 		    !tp->pci_fn)
3047 			return true;
3048 		return false;
3049 	}
3050 
3051 	return false;
3052 }
3053 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3055 {
3056 	u32 val;
3057 
3058 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3059 		return;
3060 
3061 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3065 
3066 			sg_dig_ctrl |=
3067 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3070 		}
3071 		return;
3072 	}
3073 
3074 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3075 		tg3_bmcr_reset(tp);
3076 		val = tr32(GRC_MISC_CFG);
3077 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3078 		udelay(40);
3079 		return;
3080 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3081 		u32 phytest;
3082 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3083 			u32 phy;
3084 
3085 			tg3_writephy(tp, MII_ADVERTISE, 0);
3086 			tg3_writephy(tp, MII_BMCR,
3087 				     BMCR_ANENABLE | BMCR_ANRESTART);
3088 
3089 			tg3_writephy(tp, MII_TG3_FET_TEST,
3090 				     phytest | MII_TG3_FET_SHADOW_EN);
3091 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3093 				tg3_writephy(tp,
3094 					     MII_TG3_FET_SHDW_AUXMODE4,
3095 					     phy);
3096 			}
3097 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3098 		}
3099 		return;
3100 	} else if (do_low_power) {
3101 		if (!tg3_phy_led_bug(tp))
3102 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3104 
3105 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3108 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3109 	}
3110 
3111 	/* The PHY should not be powered down on some chips because
3112 	 * of bugs.
3113 	 */
3114 	if (tg3_phy_power_bug(tp))
3115 		return;
3116 
3117 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3123 	}
3124 
3125 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3126 }
3127 
3128 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3129 static int tg3_nvram_lock(struct tg3 *tp)
3130 {
3131 	if (tg3_flag(tp, NVRAM)) {
3132 		int i;
3133 
3134 		if (tp->nvram_lock_cnt == 0) {
3135 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136 			for (i = 0; i < 8000; i++) {
3137 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3138 					break;
3139 				udelay(20);
3140 			}
3141 			if (i == 8000) {
3142 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3143 				return -ENODEV;
3144 			}
3145 		}
3146 		tp->nvram_lock_cnt++;
3147 	}
3148 	return 0;
3149 }
3150 
3151 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3152 static void tg3_nvram_unlock(struct tg3 *tp)
3153 {
3154 	if (tg3_flag(tp, NVRAM)) {
3155 		if (tp->nvram_lock_cnt > 0)
3156 			tp->nvram_lock_cnt--;
3157 		if (tp->nvram_lock_cnt == 0)
3158 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3159 	}
3160 }
3161 
3162 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3164 {
3165 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166 		u32 nvaccess = tr32(NVRAM_ACCESS);
3167 
3168 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3169 	}
3170 }
3171 
3172 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3174 {
3175 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176 		u32 nvaccess = tr32(NVRAM_ACCESS);
3177 
3178 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3179 	}
3180 }
3181 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183 					u32 offset, u32 *val)
3184 {
3185 	u32 tmp;
3186 	int i;
3187 
3188 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3189 		return -EINVAL;
3190 
3191 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192 					EEPROM_ADDR_DEVID_MASK |
3193 					EEPROM_ADDR_READ);
3194 	tw32(GRC_EEPROM_ADDR,
3195 	     tmp |
3196 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198 	      EEPROM_ADDR_ADDR_MASK) |
3199 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3200 
3201 	for (i = 0; i < 1000; i++) {
3202 		tmp = tr32(GRC_EEPROM_ADDR);
3203 
3204 		if (tmp & EEPROM_ADDR_COMPLETE)
3205 			break;
3206 		msleep(1);
3207 	}
3208 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3209 		return -EBUSY;
3210 
3211 	tmp = tr32(GRC_EEPROM_DATA);
3212 
3213 	/*
3214 	 * The data will always be opposite the native endian
3215 	 * format.  Perform a blind byteswap to compensate.
3216 	 */
3217 	*val = swab32(tmp);
3218 
3219 	return 0;
3220 }
3221 
3222 #define NVRAM_CMD_TIMEOUT 10000
3223 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3225 {
3226 	int i;
3227 
3228 	tw32(NVRAM_CMD, nvram_cmd);
3229 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230 		usleep_range(10, 40);
3231 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3232 			udelay(10);
3233 			break;
3234 		}
3235 	}
3236 
3237 	if (i == NVRAM_CMD_TIMEOUT)
3238 		return -EBUSY;
3239 
3240 	return 0;
3241 }
3242 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3244 {
3245 	if (tg3_flag(tp, NVRAM) &&
3246 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3247 	    tg3_flag(tp, FLASH) &&
3248 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3250 
3251 		addr = ((addr / tp->nvram_pagesize) <<
3252 			ATMEL_AT45DB0X1B_PAGE_POS) +
3253 		       (addr % tp->nvram_pagesize);
3254 
3255 	return addr;
3256 }
3257 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3259 {
3260 	if (tg3_flag(tp, NVRAM) &&
3261 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3262 	    tg3_flag(tp, FLASH) &&
3263 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3265 
3266 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267 			tp->nvram_pagesize) +
3268 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3269 
3270 	return addr;
3271 }
3272 
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274  * the byteswapping settings for all other register accesses.
3275  * tg3 devices are BE devices, so on a BE machine, the data
3276  * returned will be exactly as it is seen in NVRAM.  On a LE
3277  * machine, the 32-bit value will be byteswapped.
3278  */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3280 {
3281 	int ret;
3282 
3283 	if (!tg3_flag(tp, NVRAM))
3284 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3285 
3286 	offset = tg3_nvram_phys_addr(tp, offset);
3287 
3288 	if (offset > NVRAM_ADDR_MSK)
3289 		return -EINVAL;
3290 
3291 	ret = tg3_nvram_lock(tp);
3292 	if (ret)
3293 		return ret;
3294 
3295 	tg3_enable_nvram_access(tp);
3296 
3297 	tw32(NVRAM_ADDR, offset);
3298 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3300 
3301 	if (ret == 0)
3302 		*val = tr32(NVRAM_RDDATA);
3303 
3304 	tg3_disable_nvram_access(tp);
3305 
3306 	tg3_nvram_unlock(tp);
3307 
3308 	return ret;
3309 }
3310 
3311 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3313 {
3314 	u32 v;
3315 	int res = tg3_nvram_read(tp, offset, &v);
3316 	if (!res)
3317 		*val = cpu_to_be32(v);
3318 	return res;
3319 }
3320 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322 				    u32 offset, u32 len, u8 *buf)
3323 {
3324 	int i, j, rc = 0;
3325 	u32 val;
3326 
3327 	for (i = 0; i < len; i += 4) {
3328 		u32 addr;
3329 		__be32 data;
3330 
3331 		addr = offset + i;
3332 
3333 		memcpy(&data, buf + i, 4);
3334 
3335 		/*
3336 		 * The SEEPROM interface expects the data to always be opposite
3337 		 * the native endian format.  We accomplish this by reversing
3338 		 * all the operations that would have been performed on the
3339 		 * data from a call to tg3_nvram_read_be32().
3340 		 */
3341 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3342 
3343 		val = tr32(GRC_EEPROM_ADDR);
3344 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3345 
3346 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3347 			EEPROM_ADDR_READ);
3348 		tw32(GRC_EEPROM_ADDR, val |
3349 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3350 			(addr & EEPROM_ADDR_ADDR_MASK) |
3351 			EEPROM_ADDR_START |
3352 			EEPROM_ADDR_WRITE);
3353 
3354 		for (j = 0; j < 1000; j++) {
3355 			val = tr32(GRC_EEPROM_ADDR);
3356 
3357 			if (val & EEPROM_ADDR_COMPLETE)
3358 				break;
3359 			msleep(1);
3360 		}
3361 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3362 			rc = -EBUSY;
3363 			break;
3364 		}
3365 	}
3366 
3367 	return rc;
3368 }
3369 
3370 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3372 		u8 *buf)
3373 {
3374 	int ret = 0;
3375 	u32 pagesize = tp->nvram_pagesize;
3376 	u32 pagemask = pagesize - 1;
3377 	u32 nvram_cmd;
3378 	u8 *tmp;
3379 
3380 	tmp = kmalloc(pagesize, GFP_KERNEL);
3381 	if (tmp == NULL)
3382 		return -ENOMEM;
3383 
3384 	while (len) {
3385 		int j;
3386 		u32 phy_addr, page_off, size;
3387 
3388 		phy_addr = offset & ~pagemask;
3389 
3390 		for (j = 0; j < pagesize; j += 4) {
3391 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392 						  (__be32 *) (tmp + j));
3393 			if (ret)
3394 				break;
3395 		}
3396 		if (ret)
3397 			break;
3398 
3399 		page_off = offset & pagemask;
3400 		size = pagesize;
3401 		if (len < size)
3402 			size = len;
3403 
3404 		len -= size;
3405 
3406 		memcpy(tmp + page_off, buf, size);
3407 
3408 		offset = offset + (pagesize - page_off);
3409 
3410 		tg3_enable_nvram_access(tp);
3411 
3412 		/*
3413 		 * Before we can erase the flash page, we need
3414 		 * to issue a special "write enable" command.
3415 		 */
3416 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3417 
3418 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3419 			break;
3420 
3421 		/* Erase the target page */
3422 		tw32(NVRAM_ADDR, phy_addr);
3423 
3424 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3426 
3427 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428 			break;
3429 
3430 		/* Issue another write enable to start the write. */
3431 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432 
3433 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434 			break;
3435 
3436 		for (j = 0; j < pagesize; j += 4) {
3437 			__be32 data;
3438 
3439 			data = *((__be32 *) (tmp + j));
3440 
3441 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3442 
3443 			tw32(NVRAM_ADDR, phy_addr + j);
3444 
3445 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3446 				NVRAM_CMD_WR;
3447 
3448 			if (j == 0)
3449 				nvram_cmd |= NVRAM_CMD_FIRST;
3450 			else if (j == (pagesize - 4))
3451 				nvram_cmd |= NVRAM_CMD_LAST;
3452 
3453 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3454 			if (ret)
3455 				break;
3456 		}
3457 		if (ret)
3458 			break;
3459 	}
3460 
3461 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3463 
3464 	kfree(tmp);
3465 
3466 	return ret;
3467 }
3468 
3469 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3471 		u8 *buf)
3472 {
3473 	int i, ret = 0;
3474 
3475 	for (i = 0; i < len; i += 4, offset += 4) {
3476 		u32 page_off, phy_addr, nvram_cmd;
3477 		__be32 data;
3478 
3479 		memcpy(&data, buf + i, 4);
3480 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3481 
3482 		page_off = offset % tp->nvram_pagesize;
3483 
3484 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3485 
3486 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3487 
3488 		if (page_off == 0 || i == 0)
3489 			nvram_cmd |= NVRAM_CMD_FIRST;
3490 		if (page_off == (tp->nvram_pagesize - 4))
3491 			nvram_cmd |= NVRAM_CMD_LAST;
3492 
3493 		if (i == (len - 4))
3494 			nvram_cmd |= NVRAM_CMD_LAST;
3495 
3496 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497 		    !tg3_flag(tp, FLASH) ||
3498 		    !tg3_flag(tp, 57765_PLUS))
3499 			tw32(NVRAM_ADDR, phy_addr);
3500 
3501 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502 		    !tg3_flag(tp, 5755_PLUS) &&
3503 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3504 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3505 			u32 cmd;
3506 
3507 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508 			ret = tg3_nvram_exec_cmd(tp, cmd);
3509 			if (ret)
3510 				break;
3511 		}
3512 		if (!tg3_flag(tp, FLASH)) {
3513 			/* We always do complete word writes to eeprom. */
3514 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3515 		}
3516 
3517 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3518 		if (ret)
3519 			break;
3520 	}
3521 	return ret;
3522 }
3523 
3524 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3526 {
3527 	int ret;
3528 
3529 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3532 		udelay(40);
3533 	}
3534 
3535 	if (!tg3_flag(tp, NVRAM)) {
3536 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3537 	} else {
3538 		u32 grc_mode;
3539 
3540 		ret = tg3_nvram_lock(tp);
3541 		if (ret)
3542 			return ret;
3543 
3544 		tg3_enable_nvram_access(tp);
3545 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546 			tw32(NVRAM_WRITE1, 0x406);
3547 
3548 		grc_mode = tr32(GRC_MODE);
3549 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3550 
3551 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3553 				buf);
3554 		} else {
3555 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3556 				buf);
3557 		}
3558 
3559 		grc_mode = tr32(GRC_MODE);
3560 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3561 
3562 		tg3_disable_nvram_access(tp);
3563 		tg3_nvram_unlock(tp);
3564 	}
3565 
3566 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3568 		udelay(40);
3569 	}
3570 
3571 	return ret;
3572 }
3573 
3574 #define RX_CPU_SCRATCH_BASE	0x30000
3575 #define RX_CPU_SCRATCH_SIZE	0x04000
3576 #define TX_CPU_SCRATCH_BASE	0x34000
3577 #define TX_CPU_SCRATCH_SIZE	0x04000
3578 
3579 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3581 {
3582 	int i;
3583 	const int iters = 10000;
3584 
3585 	for (i = 0; i < iters; i++) {
3586 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3587 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3588 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3589 			break;
3590 		if (pci_channel_offline(tp->pdev))
3591 			return -EBUSY;
3592 	}
3593 
3594 	return (i == iters) ? -EBUSY : 0;
3595 }
3596 
3597 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3599 {
3600 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3601 
3602 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3604 	udelay(10);
3605 
3606 	return rc;
3607 }
3608 
3609 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3610 static int tg3_txcpu_pause(struct tg3 *tp)
3611 {
3612 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3613 }
3614 
3615 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3617 {
3618 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3619 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3620 }
3621 
3622 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3624 {
3625 	tg3_resume_cpu(tp, RX_CPU_BASE);
3626 }
3627 
3628 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631 	int rc;
3632 
3633 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3634 
3635 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3637 
3638 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3639 		return 0;
3640 	}
3641 	if (cpu_base == RX_CPU_BASE) {
3642 		rc = tg3_rxcpu_pause(tp);
3643 	} else {
3644 		/*
3645 		 * There is only an Rx CPU for the 5750 derivative in the
3646 		 * BCM4785.
3647 		 */
3648 		if (tg3_flag(tp, IS_SSB_CORE))
3649 			return 0;
3650 
3651 		rc = tg3_txcpu_pause(tp);
3652 	}
3653 
3654 	if (rc) {
3655 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3657 		return -ENODEV;
3658 	}
3659 
3660 	/* Clear firmware's nvram arbitration. */
3661 	if (tg3_flag(tp, NVRAM))
3662 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3663 	return 0;
3664 }
3665 
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3666 static int tg3_fw_data_len(struct tg3 *tp,
3667 			   const struct tg3_firmware_hdr *fw_hdr)
3668 {
3669 	int fw_len;
3670 
3671 	/* Non fragmented firmware have one firmware header followed by a
3672 	 * contiguous chunk of data to be written. The length field in that
3673 	 * header is not the length of data to be written but the complete
3674 	 * length of the bss. The data length is determined based on
3675 	 * tp->fw->size minus headers.
3676 	 *
3677 	 * Fragmented firmware have a main header followed by multiple
3678 	 * fragments. Each fragment is identical to non fragmented firmware
3679 	 * with a firmware header followed by a contiguous chunk of data. In
3680 	 * the main header, the length field is unused and set to 0xffffffff.
3681 	 * In each fragment header the length is the entire size of that
3682 	 * fragment i.e. fragment data + header length. Data length is
3683 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3684 	 */
3685 	if (tp->fw_len == 0xffffffff)
3686 		fw_len = be32_to_cpu(fw_hdr->len);
3687 	else
3688 		fw_len = tp->fw->size;
3689 
3690 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3691 }
3692 
3693 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695 				 u32 cpu_scratch_base, int cpu_scratch_size,
3696 				 const struct tg3_firmware_hdr *fw_hdr)
3697 {
3698 	int err, i;
3699 	void (*write_op)(struct tg3 *, u32, u32);
3700 	int total_len = tp->fw->size;
3701 
3702 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3703 		netdev_err(tp->dev,
3704 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3705 			   __func__);
3706 		return -EINVAL;
3707 	}
3708 
3709 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710 		write_op = tg3_write_mem;
3711 	else
3712 		write_op = tg3_write_indirect_reg32;
3713 
3714 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715 		/* It is possible that bootcode is still loading at this point.
3716 		 * Get the nvram lock first before halting the cpu.
3717 		 */
3718 		int lock_err = tg3_nvram_lock(tp);
3719 		err = tg3_halt_cpu(tp, cpu_base);
3720 		if (!lock_err)
3721 			tg3_nvram_unlock(tp);
3722 		if (err)
3723 			goto out;
3724 
3725 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726 			write_op(tp, cpu_scratch_base + i, 0);
3727 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3728 		tw32(cpu_base + CPU_MODE,
3729 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3730 	} else {
3731 		/* Subtract additional main header for fragmented firmware and
3732 		 * advance to the first fragment
3733 		 */
3734 		total_len -= TG3_FW_HDR_LEN;
3735 		fw_hdr++;
3736 	}
3737 
3738 	do {
3739 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3740 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741 			write_op(tp, cpu_scratch_base +
3742 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3743 				     (i * sizeof(u32)),
3744 				 be32_to_cpu(fw_data[i]));
3745 
3746 		total_len -= be32_to_cpu(fw_hdr->len);
3747 
3748 		/* Advance to next fragment */
3749 		fw_hdr = (struct tg3_firmware_hdr *)
3750 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751 	} while (total_len > 0);
3752 
3753 	err = 0;
3754 
3755 out:
3756 	return err;
3757 }
3758 
3759 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3761 {
3762 	int i;
3763 	const int iters = 5;
3764 
3765 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3766 	tw32_f(cpu_base + CPU_PC, pc);
3767 
3768 	for (i = 0; i < iters; i++) {
3769 		if (tr32(cpu_base + CPU_PC) == pc)
3770 			break;
3771 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3773 		tw32_f(cpu_base + CPU_PC, pc);
3774 		udelay(1000);
3775 	}
3776 
3777 	return (i == iters) ? -EBUSY : 0;
3778 }
3779 
3780 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3782 {
3783 	const struct tg3_firmware_hdr *fw_hdr;
3784 	int err;
3785 
3786 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3787 
3788 	/* Firmware blob starts with version numbers, followed by
3789 	   start address and length. We are setting complete length.
3790 	   length = end_address_of_bss - start_address_of_text.
3791 	   Remainder is the blob to be loaded contiguously
3792 	   from start address. */
3793 
3794 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3796 				    fw_hdr);
3797 	if (err)
3798 		return err;
3799 
3800 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3802 				    fw_hdr);
3803 	if (err)
3804 		return err;
3805 
3806 	/* Now startup only the RX cpu. */
3807 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808 				       be32_to_cpu(fw_hdr->base_addr));
3809 	if (err) {
3810 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811 			   "should be %08x\n", __func__,
3812 			   tr32(RX_CPU_BASE + CPU_PC),
3813 				be32_to_cpu(fw_hdr->base_addr));
3814 		return -ENODEV;
3815 	}
3816 
3817 	tg3_rxcpu_resume(tp);
3818 
3819 	return 0;
3820 }
3821 
tg3_validate_rxcpu_state(struct tg3 * tp)3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3823 {
3824 	const int iters = 1000;
3825 	int i;
3826 	u32 val;
3827 
3828 	/* Wait for boot code to complete initialization and enter service
3829 	 * loop. It is then safe to download service patches
3830 	 */
3831 	for (i = 0; i < iters; i++) {
3832 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3833 			break;
3834 
3835 		udelay(10);
3836 	}
3837 
3838 	if (i == iters) {
3839 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3840 		return -EBUSY;
3841 	}
3842 
3843 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3844 	if (val & 0xff) {
3845 		netdev_warn(tp->dev,
3846 			    "Other patches exist. Not downloading EEE patch\n");
3847 		return -EEXIST;
3848 	}
3849 
3850 	return 0;
3851 }
3852 
3853 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3855 {
3856 	struct tg3_firmware_hdr *fw_hdr;
3857 
3858 	if (!tg3_flag(tp, NO_NVRAM))
3859 		return;
3860 
3861 	if (tg3_validate_rxcpu_state(tp))
3862 		return;
3863 
3864 	if (!tp->fw)
3865 		return;
3866 
3867 	/* This firmware blob has a different format than older firmware
3868 	 * releases as given below. The main difference is we have fragmented
3869 	 * data to be written to non-contiguous locations.
3870 	 *
3871 	 * In the beginning we have a firmware header identical to other
3872 	 * firmware which consists of version, base addr and length. The length
3873 	 * here is unused and set to 0xffffffff.
3874 	 *
3875 	 * This is followed by a series of firmware fragments which are
3876 	 * individually identical to previous firmware. i.e. they have the
3877 	 * firmware header and followed by data for that fragment. The version
3878 	 * field of the individual fragment header is unused.
3879 	 */
3880 
3881 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3883 		return;
3884 
3885 	if (tg3_rxcpu_pause(tp))
3886 		return;
3887 
3888 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3890 
3891 	tg3_rxcpu_resume(tp);
3892 }
3893 
3894 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3896 {
3897 	const struct tg3_firmware_hdr *fw_hdr;
3898 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3899 	int err;
3900 
3901 	if (!tg3_flag(tp, FW_TSO))
3902 		return 0;
3903 
3904 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3905 
3906 	/* Firmware blob starts with version numbers, followed by
3907 	   start address and length. We are setting complete length.
3908 	   length = end_address_of_bss - start_address_of_text.
3909 	   Remainder is the blob to be loaded contiguously
3910 	   from start address. */
3911 
3912 	cpu_scratch_size = tp->fw_len;
3913 
3914 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915 		cpu_base = RX_CPU_BASE;
3916 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3917 	} else {
3918 		cpu_base = TX_CPU_BASE;
3919 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3921 	}
3922 
3923 	err = tg3_load_firmware_cpu(tp, cpu_base,
3924 				    cpu_scratch_base, cpu_scratch_size,
3925 				    fw_hdr);
3926 	if (err)
3927 		return err;
3928 
3929 	/* Now startup the cpu. */
3930 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931 				       be32_to_cpu(fw_hdr->base_addr));
3932 	if (err) {
3933 		netdev_err(tp->dev,
3934 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3935 			   __func__, tr32(cpu_base + CPU_PC),
3936 			   be32_to_cpu(fw_hdr->base_addr));
3937 		return -ENODEV;
3938 	}
3939 
3940 	tg3_resume_cpu(tp, cpu_base);
3941 	return 0;
3942 }
3943 
3944 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3946 				   int index)
3947 {
3948 	u32 addr_high, addr_low;
3949 
3950 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3951 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3952 		    (mac_addr[4] <<  8) | mac_addr[5]);
3953 
3954 	if (index < 4) {
3955 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3956 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3957 	} else {
3958 		index -= 4;
3959 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3960 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3961 	}
3962 }
3963 
3964 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3966 {
3967 	u32 addr_high;
3968 	int i;
3969 
3970 	for (i = 0; i < 4; i++) {
3971 		if (i == 1 && skip_mac_1)
3972 			continue;
3973 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3974 	}
3975 
3976 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3977 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3978 		for (i = 4; i < 16; i++)
3979 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3980 	}
3981 
3982 	addr_high = (tp->dev->dev_addr[0] +
3983 		     tp->dev->dev_addr[1] +
3984 		     tp->dev->dev_addr[2] +
3985 		     tp->dev->dev_addr[3] +
3986 		     tp->dev->dev_addr[4] +
3987 		     tp->dev->dev_addr[5]) &
3988 		TX_BACKOFF_SEED_MASK;
3989 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3990 }
3991 
tg3_enable_register_access(struct tg3 * tp)3992 static void tg3_enable_register_access(struct tg3 *tp)
3993 {
3994 	/*
3995 	 * Make sure register accesses (indirect or otherwise) will function
3996 	 * correctly.
3997 	 */
3998 	pci_write_config_dword(tp->pdev,
3999 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4000 }
4001 
tg3_power_up(struct tg3 * tp)4002 static int tg3_power_up(struct tg3 *tp)
4003 {
4004 	int err;
4005 
4006 	tg3_enable_register_access(tp);
4007 
4008 	err = pci_set_power_state(tp->pdev, PCI_D0);
4009 	if (!err) {
4010 		/* Switch out of Vaux if it is a NIC */
4011 		tg3_pwrsrc_switch_to_vmain(tp);
4012 	} else {
4013 		netdev_err(tp->dev, "Transition to D0 failed\n");
4014 	}
4015 
4016 	return err;
4017 }
4018 
4019 static int tg3_setup_phy(struct tg3 *, bool);
4020 
tg3_power_down_prepare(struct tg3 * tp)4021 static int tg3_power_down_prepare(struct tg3 *tp)
4022 {
4023 	u32 misc_host_ctrl;
4024 	bool device_should_wake, do_low_power;
4025 
4026 	tg3_enable_register_access(tp);
4027 
4028 	/* Restore the CLKREQ setting. */
4029 	if (tg3_flag(tp, CLKREQ_BUG))
4030 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4031 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4032 
4033 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4034 	tw32(TG3PCI_MISC_HOST_CTRL,
4035 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4036 
4037 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4038 			     tg3_flag(tp, WOL_ENABLE);
4039 
4040 	if (tg3_flag(tp, USE_PHYLIB)) {
4041 		do_low_power = false;
4042 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4043 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4044 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4045 			struct phy_device *phydev;
4046 			u32 phyid;
4047 
4048 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4049 
4050 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4051 
4052 			tp->link_config.speed = phydev->speed;
4053 			tp->link_config.duplex = phydev->duplex;
4054 			tp->link_config.autoneg = phydev->autoneg;
4055 			ethtool_convert_link_mode_to_legacy_u32(
4056 				&tp->link_config.advertising,
4057 				phydev->advertising);
4058 
4059 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4060 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4061 					 advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4065 					 advertising);
4066 
4067 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4069 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4070 							 advertising);
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4074 							 advertising);
4075 				} else {
4076 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4077 							 advertising);
4078 				}
4079 			}
4080 
4081 			linkmode_copy(phydev->advertising, advertising);
4082 			phy_start_aneg(phydev);
4083 
4084 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4085 			if (phyid != PHY_ID_BCMAC131) {
4086 				phyid &= PHY_BCM_OUI_MASK;
4087 				if (phyid == PHY_BCM_OUI_1 ||
4088 				    phyid == PHY_BCM_OUI_2 ||
4089 				    phyid == PHY_BCM_OUI_3)
4090 					do_low_power = true;
4091 			}
4092 		}
4093 	} else {
4094 		do_low_power = true;
4095 
4096 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4097 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4098 
4099 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4100 			tg3_setup_phy(tp, false);
4101 	}
4102 
4103 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4104 		u32 val;
4105 
4106 		val = tr32(GRC_VCPU_EXT_CTRL);
4107 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4108 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4109 		int i;
4110 		u32 val;
4111 
4112 		for (i = 0; i < 200; i++) {
4113 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4114 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4115 				break;
4116 			msleep(1);
4117 		}
4118 	}
4119 	if (tg3_flag(tp, WOL_CAP))
4120 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4121 						     WOL_DRV_STATE_SHUTDOWN |
4122 						     WOL_DRV_WOL |
4123 						     WOL_SET_MAGIC_PKT);
4124 
4125 	if (device_should_wake) {
4126 		u32 mac_mode;
4127 
4128 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4129 			if (do_low_power &&
4130 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4131 				tg3_phy_auxctl_write(tp,
4132 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4133 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4134 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4135 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4136 				udelay(40);
4137 			}
4138 
4139 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4140 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4141 			else if (tp->phy_flags &
4142 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4143 				if (tp->link_config.active_speed == SPEED_1000)
4144 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4145 				else
4146 					mac_mode = MAC_MODE_PORT_MODE_MII;
4147 			} else
4148 				mac_mode = MAC_MODE_PORT_MODE_MII;
4149 
4150 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4151 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4152 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4153 					     SPEED_100 : SPEED_10;
4154 				if (tg3_5700_link_polarity(tp, speed))
4155 					mac_mode |= MAC_MODE_LINK_POLARITY;
4156 				else
4157 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4158 			}
4159 		} else {
4160 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4161 		}
4162 
4163 		if (!tg3_flag(tp, 5750_PLUS))
4164 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4165 
4166 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4167 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4168 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4169 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4170 
4171 		if (tg3_flag(tp, ENABLE_APE))
4172 			mac_mode |= MAC_MODE_APE_TX_EN |
4173 				    MAC_MODE_APE_RX_EN |
4174 				    MAC_MODE_TDE_ENABLE;
4175 
4176 		tw32_f(MAC_MODE, mac_mode);
4177 		udelay(100);
4178 
4179 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4180 		udelay(10);
4181 	}
4182 
4183 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4184 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4185 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4186 		u32 base_val;
4187 
4188 		base_val = tp->pci_clock_ctrl;
4189 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4190 			     CLOCK_CTRL_TXCLK_DISABLE);
4191 
4192 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4193 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4194 	} else if (tg3_flag(tp, 5780_CLASS) ||
4195 		   tg3_flag(tp, CPMU_PRESENT) ||
4196 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4197 		/* do nothing */
4198 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4199 		u32 newbits1, newbits2;
4200 
4201 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4202 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4203 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4204 				    CLOCK_CTRL_TXCLK_DISABLE |
4205 				    CLOCK_CTRL_ALTCLK);
4206 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207 		} else if (tg3_flag(tp, 5705_PLUS)) {
4208 			newbits1 = CLOCK_CTRL_625_CORE;
4209 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4210 		} else {
4211 			newbits1 = CLOCK_CTRL_ALTCLK;
4212 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213 		}
4214 
4215 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4216 			    40);
4217 
4218 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4219 			    40);
4220 
4221 		if (!tg3_flag(tp, 5705_PLUS)) {
4222 			u32 newbits3;
4223 
4224 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4225 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4226 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4227 					    CLOCK_CTRL_TXCLK_DISABLE |
4228 					    CLOCK_CTRL_44MHZ_CORE);
4229 			} else {
4230 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4231 			}
4232 
4233 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4234 				    tp->pci_clock_ctrl | newbits3, 40);
4235 		}
4236 	}
4237 
4238 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4239 		tg3_power_down_phy(tp, do_low_power);
4240 
4241 	tg3_frob_aux_power(tp, true);
4242 
4243 	/* Workaround for unstable PLL clock */
4244 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4245 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4246 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4247 		u32 val = tr32(0x7d00);
4248 
4249 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4250 		tw32(0x7d00, val);
4251 		if (!tg3_flag(tp, ENABLE_ASF)) {
4252 			int err;
4253 
4254 			err = tg3_nvram_lock(tp);
4255 			tg3_halt_cpu(tp, RX_CPU_BASE);
4256 			if (!err)
4257 				tg3_nvram_unlock(tp);
4258 		}
4259 	}
4260 
4261 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4262 
4263 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	return 0;
4266 }
4267 
tg3_power_down(struct tg3 * tp)4268 static void tg3_power_down(struct tg3 *tp)
4269 {
4270 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4271 	pci_set_power_state(tp->pdev, PCI_D3hot);
4272 }
4273 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4275 {
4276 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4277 	case MII_TG3_AUX_STAT_10HALF:
4278 		*speed = SPEED_10;
4279 		*duplex = DUPLEX_HALF;
4280 		break;
4281 
4282 	case MII_TG3_AUX_STAT_10FULL:
4283 		*speed = SPEED_10;
4284 		*duplex = DUPLEX_FULL;
4285 		break;
4286 
4287 	case MII_TG3_AUX_STAT_100HALF:
4288 		*speed = SPEED_100;
4289 		*duplex = DUPLEX_HALF;
4290 		break;
4291 
4292 	case MII_TG3_AUX_STAT_100FULL:
4293 		*speed = SPEED_100;
4294 		*duplex = DUPLEX_FULL;
4295 		break;
4296 
4297 	case MII_TG3_AUX_STAT_1000HALF:
4298 		*speed = SPEED_1000;
4299 		*duplex = DUPLEX_HALF;
4300 		break;
4301 
4302 	case MII_TG3_AUX_STAT_1000FULL:
4303 		*speed = SPEED_1000;
4304 		*duplex = DUPLEX_FULL;
4305 		break;
4306 
4307 	default:
4308 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4309 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4310 				 SPEED_10;
4311 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4312 				  DUPLEX_HALF;
4313 			break;
4314 		}
4315 		*speed = SPEED_UNKNOWN;
4316 		*duplex = DUPLEX_UNKNOWN;
4317 		break;
4318 	}
4319 }
4320 
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4322 {
4323 	int err = 0;
4324 	u32 val, new_adv;
4325 
4326 	new_adv = ADVERTISE_CSMA;
4327 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4328 	new_adv |= mii_advertise_flowctrl(flowctrl);
4329 
4330 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4331 	if (err)
4332 		goto done;
4333 
4334 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4335 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4336 
4337 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4338 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4339 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4340 
4341 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4342 		if (err)
4343 			goto done;
4344 	}
4345 
4346 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4347 		goto done;
4348 
4349 	tw32(TG3_CPMU_EEE_MODE,
4350 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4351 
4352 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4353 	if (!err) {
4354 		u32 err2;
4355 
4356 		val = 0;
4357 		/* Advertise 100-BaseTX EEE ability */
4358 		if (advertise & ADVERTISED_100baseT_Full)
4359 			val |= MDIO_AN_EEE_ADV_100TX;
4360 		/* Advertise 1000-BaseT EEE ability */
4361 		if (advertise & ADVERTISED_1000baseT_Full)
4362 			val |= MDIO_AN_EEE_ADV_1000T;
4363 
4364 		if (!tp->eee.eee_enabled) {
4365 			val = 0;
4366 			tp->eee.advertised = 0;
4367 		} else {
4368 			tp->eee.advertised = advertise &
4369 					     (ADVERTISED_100baseT_Full |
4370 					      ADVERTISED_1000baseT_Full);
4371 		}
4372 
4373 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4374 		if (err)
4375 			val = 0;
4376 
4377 		switch (tg3_asic_rev(tp)) {
4378 		case ASIC_REV_5717:
4379 		case ASIC_REV_57765:
4380 		case ASIC_REV_57766:
4381 		case ASIC_REV_5719:
4382 			/* If we advertised any eee advertisements above... */
4383 			if (val)
4384 				val = MII_TG3_DSP_TAP26_ALNOKO |
4385 				      MII_TG3_DSP_TAP26_RMRXSTO |
4386 				      MII_TG3_DSP_TAP26_OPCSINPT;
4387 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4388 			fallthrough;
4389 		case ASIC_REV_5720:
4390 		case ASIC_REV_5762:
4391 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4392 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4393 						 MII_TG3_DSP_CH34TP2_HIBW01);
4394 		}
4395 
4396 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4397 		if (!err)
4398 			err = err2;
4399 	}
4400 
4401 done:
4402 	return err;
4403 }
4404 
tg3_phy_copper_begin(struct tg3 * tp)4405 static void tg3_phy_copper_begin(struct tg3 *tp)
4406 {
4407 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4408 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4409 		u32 adv, fc;
4410 
4411 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4412 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4413 			adv = ADVERTISED_10baseT_Half |
4414 			      ADVERTISED_10baseT_Full;
4415 			if (tg3_flag(tp, WOL_SPEED_100MB))
4416 				adv |= ADVERTISED_100baseT_Half |
4417 				       ADVERTISED_100baseT_Full;
4418 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4419 				if (!(tp->phy_flags &
4420 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4421 					adv |= ADVERTISED_1000baseT_Half;
4422 				adv |= ADVERTISED_1000baseT_Full;
4423 			}
4424 
4425 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4426 		} else {
4427 			adv = tp->link_config.advertising;
4428 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4429 				adv &= ~(ADVERTISED_1000baseT_Half |
4430 					 ADVERTISED_1000baseT_Full);
4431 
4432 			fc = tp->link_config.flowctrl;
4433 		}
4434 
4435 		tg3_phy_autoneg_cfg(tp, adv, fc);
4436 
4437 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4438 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4439 			/* Normally during power down we want to autonegotiate
4440 			 * the lowest possible speed for WOL. However, to avoid
4441 			 * link flap, we leave it untouched.
4442 			 */
4443 			return;
4444 		}
4445 
4446 		tg3_writephy(tp, MII_BMCR,
4447 			     BMCR_ANENABLE | BMCR_ANRESTART);
4448 	} else {
4449 		int i;
4450 		u32 bmcr, orig_bmcr;
4451 
4452 		tp->link_config.active_speed = tp->link_config.speed;
4453 		tp->link_config.active_duplex = tp->link_config.duplex;
4454 
4455 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4456 			/* With autoneg disabled, 5715 only links up when the
4457 			 * advertisement register has the configured speed
4458 			 * enabled.
4459 			 */
4460 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4461 		}
4462 
4463 		bmcr = 0;
4464 		switch (tp->link_config.speed) {
4465 		default:
4466 		case SPEED_10:
4467 			break;
4468 
4469 		case SPEED_100:
4470 			bmcr |= BMCR_SPEED100;
4471 			break;
4472 
4473 		case SPEED_1000:
4474 			bmcr |= BMCR_SPEED1000;
4475 			break;
4476 		}
4477 
4478 		if (tp->link_config.duplex == DUPLEX_FULL)
4479 			bmcr |= BMCR_FULLDPLX;
4480 
4481 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4482 		    (bmcr != orig_bmcr)) {
4483 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4484 			for (i = 0; i < 1500; i++) {
4485 				u32 tmp;
4486 
4487 				udelay(10);
4488 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4489 				    tg3_readphy(tp, MII_BMSR, &tmp))
4490 					continue;
4491 				if (!(tmp & BMSR_LSTATUS)) {
4492 					udelay(40);
4493 					break;
4494 				}
4495 			}
4496 			tg3_writephy(tp, MII_BMCR, bmcr);
4497 			udelay(40);
4498 		}
4499 	}
4500 }
4501 
tg3_phy_pull_config(struct tg3 * tp)4502 static int tg3_phy_pull_config(struct tg3 *tp)
4503 {
4504 	int err;
4505 	u32 val;
4506 
4507 	err = tg3_readphy(tp, MII_BMCR, &val);
4508 	if (err)
4509 		goto done;
4510 
4511 	if (!(val & BMCR_ANENABLE)) {
4512 		tp->link_config.autoneg = AUTONEG_DISABLE;
4513 		tp->link_config.advertising = 0;
4514 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4515 
4516 		err = -EIO;
4517 
4518 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4519 		case 0:
4520 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4521 				goto done;
4522 
4523 			tp->link_config.speed = SPEED_10;
4524 			break;
4525 		case BMCR_SPEED100:
4526 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527 				goto done;
4528 
4529 			tp->link_config.speed = SPEED_100;
4530 			break;
4531 		case BMCR_SPEED1000:
4532 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4533 				tp->link_config.speed = SPEED_1000;
4534 				break;
4535 			}
4536 			fallthrough;
4537 		default:
4538 			goto done;
4539 		}
4540 
4541 		if (val & BMCR_FULLDPLX)
4542 			tp->link_config.duplex = DUPLEX_FULL;
4543 		else
4544 			tp->link_config.duplex = DUPLEX_HALF;
4545 
4546 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4547 
4548 		err = 0;
4549 		goto done;
4550 	}
4551 
4552 	tp->link_config.autoneg = AUTONEG_ENABLE;
4553 	tp->link_config.advertising = ADVERTISED_Autoneg;
4554 	tg3_flag_set(tp, PAUSE_AUTONEG);
4555 
4556 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4557 		u32 adv;
4558 
4559 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4560 		if (err)
4561 			goto done;
4562 
4563 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4564 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4565 
4566 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4567 	} else {
4568 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4569 	}
4570 
4571 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4572 		u32 adv;
4573 
4574 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4575 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4576 			if (err)
4577 				goto done;
4578 
4579 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4580 		} else {
4581 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4582 			if (err)
4583 				goto done;
4584 
4585 			adv = tg3_decode_flowctrl_1000X(val);
4586 			tp->link_config.flowctrl = adv;
4587 
4588 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4589 			adv = mii_adv_to_ethtool_adv_x(val);
4590 		}
4591 
4592 		tp->link_config.advertising |= adv;
4593 	}
4594 
4595 done:
4596 	return err;
4597 }
4598 
tg3_init_5401phy_dsp(struct tg3 * tp)4599 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4600 {
4601 	int err;
4602 
4603 	/* Turn off tap power management. */
4604 	/* Set Extended packet length bit */
4605 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4606 
4607 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4608 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4609 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4610 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4611 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4612 
4613 	udelay(40);
4614 
4615 	return err;
4616 }
4617 
tg3_phy_eee_config_ok(struct tg3 * tp)4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4619 {
4620 	struct ethtool_eee eee;
4621 
4622 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4623 		return true;
4624 
4625 	tg3_eee_pull_config(tp, &eee);
4626 
4627 	if (tp->eee.eee_enabled) {
4628 		if (tp->eee.advertised != eee.advertised ||
4629 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4630 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4631 			return false;
4632 	} else {
4633 		/* EEE is disabled but we're advertising */
4634 		if (eee.advertised)
4635 			return false;
4636 	}
4637 
4638 	return true;
4639 }
4640 
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4642 {
4643 	u32 advmsk, tgtadv, advertising;
4644 
4645 	advertising = tp->link_config.advertising;
4646 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4647 
4648 	advmsk = ADVERTISE_ALL;
4649 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4650 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4651 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4652 	}
4653 
4654 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4655 		return false;
4656 
4657 	if ((*lcladv & advmsk) != tgtadv)
4658 		return false;
4659 
4660 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4661 		u32 tg3_ctrl;
4662 
4663 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4664 
4665 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4666 			return false;
4667 
4668 		if (tgtadv &&
4669 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4670 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4671 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4672 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4673 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4674 		} else {
4675 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4676 		}
4677 
4678 		if (tg3_ctrl != tgtadv)
4679 			return false;
4680 	}
4681 
4682 	return true;
4683 }
4684 
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4686 {
4687 	u32 lpeth = 0;
4688 
4689 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4690 		u32 val;
4691 
4692 		if (tg3_readphy(tp, MII_STAT1000, &val))
4693 			return false;
4694 
4695 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4696 	}
4697 
4698 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4699 		return false;
4700 
4701 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4702 	tp->link_config.rmt_adv = lpeth;
4703 
4704 	return true;
4705 }
4706 
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4708 {
4709 	if (curr_link_up != tp->link_up) {
4710 		if (curr_link_up) {
4711 			netif_carrier_on(tp->dev);
4712 		} else {
4713 			netif_carrier_off(tp->dev);
4714 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4715 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4716 		}
4717 
4718 		tg3_link_report(tp);
4719 		return true;
4720 	}
4721 
4722 	return false;
4723 }
4724 
tg3_clear_mac_status(struct tg3 * tp)4725 static void tg3_clear_mac_status(struct tg3 *tp)
4726 {
4727 	tw32(MAC_EVENT, 0);
4728 
4729 	tw32_f(MAC_STATUS,
4730 	       MAC_STATUS_SYNC_CHANGED |
4731 	       MAC_STATUS_CFG_CHANGED |
4732 	       MAC_STATUS_MI_COMPLETION |
4733 	       MAC_STATUS_LNKSTATE_CHANGED);
4734 	udelay(40);
4735 }
4736 
tg3_setup_eee(struct tg3 * tp)4737 static void tg3_setup_eee(struct tg3 *tp)
4738 {
4739 	u32 val;
4740 
4741 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4742 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4743 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4744 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4745 
4746 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4747 
4748 	tw32_f(TG3_CPMU_EEE_CTRL,
4749 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4750 
4751 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4752 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4753 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4754 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4755 
4756 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4757 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4758 
4759 	if (tg3_flag(tp, ENABLE_APE))
4760 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4761 
4762 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4763 
4764 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4765 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4766 	       (tp->eee.tx_lpi_timer & 0xffff));
4767 
4768 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4769 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4770 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4771 }
4772 
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4774 {
4775 	bool current_link_up;
4776 	u32 bmsr, val;
4777 	u32 lcl_adv, rmt_adv;
4778 	u32 current_speed;
4779 	u8 current_duplex;
4780 	int i, err;
4781 
4782 	tg3_clear_mac_status(tp);
4783 
4784 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4785 		tw32_f(MAC_MI_MODE,
4786 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4787 		udelay(80);
4788 	}
4789 
4790 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4791 
4792 	/* Some third-party PHYs need to be reset on link going
4793 	 * down.
4794 	 */
4795 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4796 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4797 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4798 	    tp->link_up) {
4799 		tg3_readphy(tp, MII_BMSR, &bmsr);
4800 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4801 		    !(bmsr & BMSR_LSTATUS))
4802 			force_reset = true;
4803 	}
4804 	if (force_reset)
4805 		tg3_phy_reset(tp);
4806 
4807 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4808 		tg3_readphy(tp, MII_BMSR, &bmsr);
4809 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4810 		    !tg3_flag(tp, INIT_COMPLETE))
4811 			bmsr = 0;
4812 
4813 		if (!(bmsr & BMSR_LSTATUS)) {
4814 			err = tg3_init_5401phy_dsp(tp);
4815 			if (err)
4816 				return err;
4817 
4818 			tg3_readphy(tp, MII_BMSR, &bmsr);
4819 			for (i = 0; i < 1000; i++) {
4820 				udelay(10);
4821 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4822 				    (bmsr & BMSR_LSTATUS)) {
4823 					udelay(40);
4824 					break;
4825 				}
4826 			}
4827 
4828 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4829 			    TG3_PHY_REV_BCM5401_B0 &&
4830 			    !(bmsr & BMSR_LSTATUS) &&
4831 			    tp->link_config.active_speed == SPEED_1000) {
4832 				err = tg3_phy_reset(tp);
4833 				if (!err)
4834 					err = tg3_init_5401phy_dsp(tp);
4835 				if (err)
4836 					return err;
4837 			}
4838 		}
4839 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4840 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4841 		/* 5701 {A0,B0} CRC bug workaround */
4842 		tg3_writephy(tp, 0x15, 0x0a75);
4843 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4845 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 	}
4847 
4848 	/* Clear pending interrupts... */
4849 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4851 
4852 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4853 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4854 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4855 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4856 
4857 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4858 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4859 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4860 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4861 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4862 		else
4863 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4864 	}
4865 
4866 	current_link_up = false;
4867 	current_speed = SPEED_UNKNOWN;
4868 	current_duplex = DUPLEX_UNKNOWN;
4869 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4870 	tp->link_config.rmt_adv = 0;
4871 
4872 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4873 		err = tg3_phy_auxctl_read(tp,
4874 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875 					  &val);
4876 		if (!err && !(val & (1 << 10))) {
4877 			tg3_phy_auxctl_write(tp,
4878 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4879 					     val | (1 << 10));
4880 			goto relink;
4881 		}
4882 	}
4883 
4884 	bmsr = 0;
4885 	for (i = 0; i < 100; i++) {
4886 		tg3_readphy(tp, MII_BMSR, &bmsr);
4887 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4888 		    (bmsr & BMSR_LSTATUS))
4889 			break;
4890 		udelay(40);
4891 	}
4892 
4893 	if (bmsr & BMSR_LSTATUS) {
4894 		u32 aux_stat, bmcr;
4895 
4896 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4897 		for (i = 0; i < 2000; i++) {
4898 			udelay(10);
4899 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4900 			    aux_stat)
4901 				break;
4902 		}
4903 
4904 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4905 					     &current_speed,
4906 					     &current_duplex);
4907 
4908 		bmcr = 0;
4909 		for (i = 0; i < 200; i++) {
4910 			tg3_readphy(tp, MII_BMCR, &bmcr);
4911 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4912 				continue;
4913 			if (bmcr && bmcr != 0x7fff)
4914 				break;
4915 			udelay(10);
4916 		}
4917 
4918 		lcl_adv = 0;
4919 		rmt_adv = 0;
4920 
4921 		tp->link_config.active_speed = current_speed;
4922 		tp->link_config.active_duplex = current_duplex;
4923 
4924 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4926 
4927 			if ((bmcr & BMCR_ANENABLE) &&
4928 			    eee_config_ok &&
4929 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4930 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4931 				current_link_up = true;
4932 
4933 			/* EEE settings changes take effect only after a phy
4934 			 * reset.  If we have skipped a reset due to Link Flap
4935 			 * Avoidance being enabled, do it now.
4936 			 */
4937 			if (!eee_config_ok &&
4938 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4939 			    !force_reset) {
4940 				tg3_setup_eee(tp);
4941 				tg3_phy_reset(tp);
4942 			}
4943 		} else {
4944 			if (!(bmcr & BMCR_ANENABLE) &&
4945 			    tp->link_config.speed == current_speed &&
4946 			    tp->link_config.duplex == current_duplex) {
4947 				current_link_up = true;
4948 			}
4949 		}
4950 
4951 		if (current_link_up &&
4952 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4953 			u32 reg, bit;
4954 
4955 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4956 				reg = MII_TG3_FET_GEN_STAT;
4957 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4958 			} else {
4959 				reg = MII_TG3_EXT_STAT;
4960 				bit = MII_TG3_EXT_STAT_MDIX;
4961 			}
4962 
4963 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4964 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4965 
4966 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4967 		}
4968 	}
4969 
4970 relink:
4971 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4972 		tg3_phy_copper_begin(tp);
4973 
4974 		if (tg3_flag(tp, ROBOSWITCH)) {
4975 			current_link_up = true;
4976 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4977 			current_speed = SPEED_1000;
4978 			current_duplex = DUPLEX_FULL;
4979 			tp->link_config.active_speed = current_speed;
4980 			tp->link_config.active_duplex = current_duplex;
4981 		}
4982 
4983 		tg3_readphy(tp, MII_BMSR, &bmsr);
4984 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4985 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4986 			current_link_up = true;
4987 	}
4988 
4989 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4990 	if (current_link_up) {
4991 		if (tp->link_config.active_speed == SPEED_100 ||
4992 		    tp->link_config.active_speed == SPEED_10)
4993 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994 		else
4995 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4997 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4998 	else
4999 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5000 
5001 	/* In order for the 5750 core in BCM4785 chip to work properly
5002 	 * in RGMII mode, the Led Control Register must be set up.
5003 	 */
5004 	if (tg3_flag(tp, RGMII_MODE)) {
5005 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5006 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5007 
5008 		if (tp->link_config.active_speed == SPEED_10)
5009 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5010 		else if (tp->link_config.active_speed == SPEED_100)
5011 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012 				     LED_CTRL_100MBPS_ON);
5013 		else if (tp->link_config.active_speed == SPEED_1000)
5014 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5015 				     LED_CTRL_1000MBPS_ON);
5016 
5017 		tw32(MAC_LED_CTRL, led_ctrl);
5018 		udelay(40);
5019 	}
5020 
5021 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5022 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5023 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5024 
5025 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5026 		if (current_link_up &&
5027 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5028 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5029 		else
5030 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5031 	}
5032 
5033 	/* ??? Without this setting Netgear GA302T PHY does not
5034 	 * ??? send/receive packets...
5035 	 */
5036 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5037 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5038 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5039 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5040 		udelay(80);
5041 	}
5042 
5043 	tw32_f(MAC_MODE, tp->mac_mode);
5044 	udelay(40);
5045 
5046 	tg3_phy_eee_adjust(tp, current_link_up);
5047 
5048 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5049 		/* Polled via timer. */
5050 		tw32_f(MAC_EVENT, 0);
5051 	} else {
5052 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5053 	}
5054 	udelay(40);
5055 
5056 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5057 	    current_link_up &&
5058 	    tp->link_config.active_speed == SPEED_1000 &&
5059 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5060 		udelay(120);
5061 		tw32_f(MAC_STATUS,
5062 		     (MAC_STATUS_SYNC_CHANGED |
5063 		      MAC_STATUS_CFG_CHANGED));
5064 		udelay(40);
5065 		tg3_write_mem(tp,
5066 			      NIC_SRAM_FIRMWARE_MBOX,
5067 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5068 	}
5069 
5070 	/* Prevent send BD corruption. */
5071 	if (tg3_flag(tp, CLKREQ_BUG)) {
5072 		if (tp->link_config.active_speed == SPEED_100 ||
5073 		    tp->link_config.active_speed == SPEED_10)
5074 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5075 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5076 		else
5077 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5078 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5079 	}
5080 
5081 	tg3_test_and_report_link_chg(tp, current_link_up);
5082 
5083 	return 0;
5084 }
5085 
5086 struct tg3_fiber_aneginfo {
5087 	int state;
5088 #define ANEG_STATE_UNKNOWN		0
5089 #define ANEG_STATE_AN_ENABLE		1
5090 #define ANEG_STATE_RESTART_INIT		2
5091 #define ANEG_STATE_RESTART		3
5092 #define ANEG_STATE_DISABLE_LINK_OK	4
5093 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5094 #define ANEG_STATE_ABILITY_DETECT	6
5095 #define ANEG_STATE_ACK_DETECT_INIT	7
5096 #define ANEG_STATE_ACK_DETECT		8
5097 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5098 #define ANEG_STATE_COMPLETE_ACK		10
5099 #define ANEG_STATE_IDLE_DETECT_INIT	11
5100 #define ANEG_STATE_IDLE_DETECT		12
5101 #define ANEG_STATE_LINK_OK		13
5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5103 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5104 
5105 	u32 flags;
5106 #define MR_AN_ENABLE		0x00000001
5107 #define MR_RESTART_AN		0x00000002
5108 #define MR_AN_COMPLETE		0x00000004
5109 #define MR_PAGE_RX		0x00000008
5110 #define MR_NP_LOADED		0x00000010
5111 #define MR_TOGGLE_TX		0x00000020
5112 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5113 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5114 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5115 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5116 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5117 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5118 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5119 #define MR_TOGGLE_RX		0x00002000
5120 #define MR_NP_RX		0x00004000
5121 
5122 #define MR_LINK_OK		0x80000000
5123 
5124 	unsigned long link_time, cur_time;
5125 
5126 	u32 ability_match_cfg;
5127 	int ability_match_count;
5128 
5129 	char ability_match, idle_match, ack_match;
5130 
5131 	u32 txconfig, rxconfig;
5132 #define ANEG_CFG_NP		0x00000080
5133 #define ANEG_CFG_ACK		0x00000040
5134 #define ANEG_CFG_RF2		0x00000020
5135 #define ANEG_CFG_RF1		0x00000010
5136 #define ANEG_CFG_PS2		0x00000001
5137 #define ANEG_CFG_PS1		0x00008000
5138 #define ANEG_CFG_HD		0x00004000
5139 #define ANEG_CFG_FD		0x00002000
5140 #define ANEG_CFG_INVAL		0x00001f06
5141 
5142 };
5143 #define ANEG_OK		0
5144 #define ANEG_DONE	1
5145 #define ANEG_TIMER_ENAB	2
5146 #define ANEG_FAILED	-1
5147 
5148 #define ANEG_STATE_SETTLE_TIME	10000
5149 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5151 				   struct tg3_fiber_aneginfo *ap)
5152 {
5153 	u16 flowctrl;
5154 	unsigned long delta;
5155 	u32 rx_cfg_reg;
5156 	int ret;
5157 
5158 	if (ap->state == ANEG_STATE_UNKNOWN) {
5159 		ap->rxconfig = 0;
5160 		ap->link_time = 0;
5161 		ap->cur_time = 0;
5162 		ap->ability_match_cfg = 0;
5163 		ap->ability_match_count = 0;
5164 		ap->ability_match = 0;
5165 		ap->idle_match = 0;
5166 		ap->ack_match = 0;
5167 	}
5168 	ap->cur_time++;
5169 
5170 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5171 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5172 
5173 		if (rx_cfg_reg != ap->ability_match_cfg) {
5174 			ap->ability_match_cfg = rx_cfg_reg;
5175 			ap->ability_match = 0;
5176 			ap->ability_match_count = 0;
5177 		} else {
5178 			if (++ap->ability_match_count > 1) {
5179 				ap->ability_match = 1;
5180 				ap->ability_match_cfg = rx_cfg_reg;
5181 			}
5182 		}
5183 		if (rx_cfg_reg & ANEG_CFG_ACK)
5184 			ap->ack_match = 1;
5185 		else
5186 			ap->ack_match = 0;
5187 
5188 		ap->idle_match = 0;
5189 	} else {
5190 		ap->idle_match = 1;
5191 		ap->ability_match_cfg = 0;
5192 		ap->ability_match_count = 0;
5193 		ap->ability_match = 0;
5194 		ap->ack_match = 0;
5195 
5196 		rx_cfg_reg = 0;
5197 	}
5198 
5199 	ap->rxconfig = rx_cfg_reg;
5200 	ret = ANEG_OK;
5201 
5202 	switch (ap->state) {
5203 	case ANEG_STATE_UNKNOWN:
5204 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5205 			ap->state = ANEG_STATE_AN_ENABLE;
5206 
5207 		fallthrough;
5208 	case ANEG_STATE_AN_ENABLE:
5209 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5210 		if (ap->flags & MR_AN_ENABLE) {
5211 			ap->link_time = 0;
5212 			ap->cur_time = 0;
5213 			ap->ability_match_cfg = 0;
5214 			ap->ability_match_count = 0;
5215 			ap->ability_match = 0;
5216 			ap->idle_match = 0;
5217 			ap->ack_match = 0;
5218 
5219 			ap->state = ANEG_STATE_RESTART_INIT;
5220 		} else {
5221 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5222 		}
5223 		break;
5224 
5225 	case ANEG_STATE_RESTART_INIT:
5226 		ap->link_time = ap->cur_time;
5227 		ap->flags &= ~(MR_NP_LOADED);
5228 		ap->txconfig = 0;
5229 		tw32(MAC_TX_AUTO_NEG, 0);
5230 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 		tw32_f(MAC_MODE, tp->mac_mode);
5232 		udelay(40);
5233 
5234 		ret = ANEG_TIMER_ENAB;
5235 		ap->state = ANEG_STATE_RESTART;
5236 
5237 		fallthrough;
5238 	case ANEG_STATE_RESTART:
5239 		delta = ap->cur_time - ap->link_time;
5240 		if (delta > ANEG_STATE_SETTLE_TIME)
5241 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5242 		else
5243 			ret = ANEG_TIMER_ENAB;
5244 		break;
5245 
5246 	case ANEG_STATE_DISABLE_LINK_OK:
5247 		ret = ANEG_DONE;
5248 		break;
5249 
5250 	case ANEG_STATE_ABILITY_DETECT_INIT:
5251 		ap->flags &= ~(MR_TOGGLE_TX);
5252 		ap->txconfig = ANEG_CFG_FD;
5253 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5254 		if (flowctrl & ADVERTISE_1000XPAUSE)
5255 			ap->txconfig |= ANEG_CFG_PS1;
5256 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5257 			ap->txconfig |= ANEG_CFG_PS2;
5258 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 		tw32_f(MAC_MODE, tp->mac_mode);
5261 		udelay(40);
5262 
5263 		ap->state = ANEG_STATE_ABILITY_DETECT;
5264 		break;
5265 
5266 	case ANEG_STATE_ABILITY_DETECT:
5267 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5268 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5269 		break;
5270 
5271 	case ANEG_STATE_ACK_DETECT_INIT:
5272 		ap->txconfig |= ANEG_CFG_ACK;
5273 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5274 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5275 		tw32_f(MAC_MODE, tp->mac_mode);
5276 		udelay(40);
5277 
5278 		ap->state = ANEG_STATE_ACK_DETECT;
5279 
5280 		fallthrough;
5281 	case ANEG_STATE_ACK_DETECT:
5282 		if (ap->ack_match != 0) {
5283 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5284 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5285 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5286 			} else {
5287 				ap->state = ANEG_STATE_AN_ENABLE;
5288 			}
5289 		} else if (ap->ability_match != 0 &&
5290 			   ap->rxconfig == 0) {
5291 			ap->state = ANEG_STATE_AN_ENABLE;
5292 		}
5293 		break;
5294 
5295 	case ANEG_STATE_COMPLETE_ACK_INIT:
5296 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5297 			ret = ANEG_FAILED;
5298 			break;
5299 		}
5300 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5301 			       MR_LP_ADV_HALF_DUPLEX |
5302 			       MR_LP_ADV_SYM_PAUSE |
5303 			       MR_LP_ADV_ASYM_PAUSE |
5304 			       MR_LP_ADV_REMOTE_FAULT1 |
5305 			       MR_LP_ADV_REMOTE_FAULT2 |
5306 			       MR_LP_ADV_NEXT_PAGE |
5307 			       MR_TOGGLE_RX |
5308 			       MR_NP_RX);
5309 		if (ap->rxconfig & ANEG_CFG_FD)
5310 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5311 		if (ap->rxconfig & ANEG_CFG_HD)
5312 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5313 		if (ap->rxconfig & ANEG_CFG_PS1)
5314 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5315 		if (ap->rxconfig & ANEG_CFG_PS2)
5316 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5317 		if (ap->rxconfig & ANEG_CFG_RF1)
5318 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5319 		if (ap->rxconfig & ANEG_CFG_RF2)
5320 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5321 		if (ap->rxconfig & ANEG_CFG_NP)
5322 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5323 
5324 		ap->link_time = ap->cur_time;
5325 
5326 		ap->flags ^= (MR_TOGGLE_TX);
5327 		if (ap->rxconfig & 0x0008)
5328 			ap->flags |= MR_TOGGLE_RX;
5329 		if (ap->rxconfig & ANEG_CFG_NP)
5330 			ap->flags |= MR_NP_RX;
5331 		ap->flags |= MR_PAGE_RX;
5332 
5333 		ap->state = ANEG_STATE_COMPLETE_ACK;
5334 		ret = ANEG_TIMER_ENAB;
5335 		break;
5336 
5337 	case ANEG_STATE_COMPLETE_ACK:
5338 		if (ap->ability_match != 0 &&
5339 		    ap->rxconfig == 0) {
5340 			ap->state = ANEG_STATE_AN_ENABLE;
5341 			break;
5342 		}
5343 		delta = ap->cur_time - ap->link_time;
5344 		if (delta > ANEG_STATE_SETTLE_TIME) {
5345 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5346 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347 			} else {
5348 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5349 				    !(ap->flags & MR_NP_RX)) {
5350 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5351 				} else {
5352 					ret = ANEG_FAILED;
5353 				}
5354 			}
5355 		}
5356 		break;
5357 
5358 	case ANEG_STATE_IDLE_DETECT_INIT:
5359 		ap->link_time = ap->cur_time;
5360 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5361 		tw32_f(MAC_MODE, tp->mac_mode);
5362 		udelay(40);
5363 
5364 		ap->state = ANEG_STATE_IDLE_DETECT;
5365 		ret = ANEG_TIMER_ENAB;
5366 		break;
5367 
5368 	case ANEG_STATE_IDLE_DETECT:
5369 		if (ap->ability_match != 0 &&
5370 		    ap->rxconfig == 0) {
5371 			ap->state = ANEG_STATE_AN_ENABLE;
5372 			break;
5373 		}
5374 		delta = ap->cur_time - ap->link_time;
5375 		if (delta > ANEG_STATE_SETTLE_TIME) {
5376 			/* XXX another gem from the Broadcom driver :( */
5377 			ap->state = ANEG_STATE_LINK_OK;
5378 		}
5379 		break;
5380 
5381 	case ANEG_STATE_LINK_OK:
5382 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5383 		ret = ANEG_DONE;
5384 		break;
5385 
5386 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5387 		/* ??? unimplemented */
5388 		break;
5389 
5390 	case ANEG_STATE_NEXT_PAGE_WAIT:
5391 		/* ??? unimplemented */
5392 		break;
5393 
5394 	default:
5395 		ret = ANEG_FAILED;
5396 		break;
5397 	}
5398 
5399 	return ret;
5400 }
5401 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5403 {
5404 	int res = 0;
5405 	struct tg3_fiber_aneginfo aninfo;
5406 	int status = ANEG_FAILED;
5407 	unsigned int tick;
5408 	u32 tmp;
5409 
5410 	tw32_f(MAC_TX_AUTO_NEG, 0);
5411 
5412 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5413 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5414 	udelay(40);
5415 
5416 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5417 	udelay(40);
5418 
5419 	memset(&aninfo, 0, sizeof(aninfo));
5420 	aninfo.flags |= MR_AN_ENABLE;
5421 	aninfo.state = ANEG_STATE_UNKNOWN;
5422 	aninfo.cur_time = 0;
5423 	tick = 0;
5424 	while (++tick < 195000) {
5425 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5426 		if (status == ANEG_DONE || status == ANEG_FAILED)
5427 			break;
5428 
5429 		udelay(1);
5430 	}
5431 
5432 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5433 	tw32_f(MAC_MODE, tp->mac_mode);
5434 	udelay(40);
5435 
5436 	*txflags = aninfo.txconfig;
5437 	*rxflags = aninfo.flags;
5438 
5439 	if (status == ANEG_DONE &&
5440 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5441 			     MR_LP_ADV_FULL_DUPLEX)))
5442 		res = 1;
5443 
5444 	return res;
5445 }
5446 
tg3_init_bcm8002(struct tg3 * tp)5447 static void tg3_init_bcm8002(struct tg3 *tp)
5448 {
5449 	u32 mac_status = tr32(MAC_STATUS);
5450 	int i;
5451 
5452 	/* Reset when initting first time or we have a link. */
5453 	if (tg3_flag(tp, INIT_COMPLETE) &&
5454 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5455 		return;
5456 
5457 	/* Set PLL lock range. */
5458 	tg3_writephy(tp, 0x16, 0x8007);
5459 
5460 	/* SW reset */
5461 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5462 
5463 	/* Wait for reset to complete. */
5464 	/* XXX schedule_timeout() ... */
5465 	for (i = 0; i < 500; i++)
5466 		udelay(10);
5467 
5468 	/* Config mode; select PMA/Ch 1 regs. */
5469 	tg3_writephy(tp, 0x10, 0x8411);
5470 
5471 	/* Enable auto-lock and comdet, select txclk for tx. */
5472 	tg3_writephy(tp, 0x11, 0x0a10);
5473 
5474 	tg3_writephy(tp, 0x18, 0x00a0);
5475 	tg3_writephy(tp, 0x16, 0x41ff);
5476 
5477 	/* Assert and deassert POR. */
5478 	tg3_writephy(tp, 0x13, 0x0400);
5479 	udelay(40);
5480 	tg3_writephy(tp, 0x13, 0x0000);
5481 
5482 	tg3_writephy(tp, 0x11, 0x0a50);
5483 	udelay(40);
5484 	tg3_writephy(tp, 0x11, 0x0a10);
5485 
5486 	/* Wait for signal to stabilize */
5487 	/* XXX schedule_timeout() ... */
5488 	for (i = 0; i < 15000; i++)
5489 		udelay(10);
5490 
5491 	/* Deselect the channel register so we can read the PHYID
5492 	 * later.
5493 	 */
5494 	tg3_writephy(tp, 0x10, 0x8011);
5495 }
5496 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5498 {
5499 	u16 flowctrl;
5500 	bool current_link_up;
5501 	u32 sg_dig_ctrl, sg_dig_status;
5502 	u32 serdes_cfg, expected_sg_dig_ctrl;
5503 	int workaround, port_a;
5504 
5505 	serdes_cfg = 0;
5506 	workaround = 0;
5507 	port_a = 1;
5508 	current_link_up = false;
5509 
5510 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5512 		workaround = 1;
5513 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514 			port_a = 0;
5515 
5516 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5517 		/* preserve bits 20-23 for voltage regulator */
5518 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519 	}
5520 
5521 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522 
5523 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5525 			if (workaround) {
5526 				u32 val = serdes_cfg;
5527 
5528 				if (port_a)
5529 					val |= 0xc010000;
5530 				else
5531 					val |= 0x4010000;
5532 				tw32_f(MAC_SERDES_CFG, val);
5533 			}
5534 
5535 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536 		}
5537 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538 			tg3_setup_flow_control(tp, 0, 0);
5539 			current_link_up = true;
5540 		}
5541 		goto out;
5542 	}
5543 
5544 	/* Want auto-negotiation.  */
5545 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546 
5547 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 	if (flowctrl & ADVERTISE_1000XPAUSE)
5549 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552 
5553 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555 		    tp->serdes_counter &&
5556 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557 				    MAC_STATUS_RCVD_CFG)) ==
5558 		     MAC_STATUS_PCS_SYNCED)) {
5559 			tp->serdes_counter--;
5560 			current_link_up = true;
5561 			goto out;
5562 		}
5563 restart_autoneg:
5564 		if (workaround)
5565 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5567 		udelay(5);
5568 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569 
5570 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573 				 MAC_STATUS_SIGNAL_DET)) {
5574 		sg_dig_status = tr32(SG_DIG_STATUS);
5575 		mac_status = tr32(MAC_STATUS);
5576 
5577 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579 			u32 local_adv = 0, remote_adv = 0;
5580 
5581 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582 				local_adv |= ADVERTISE_1000XPAUSE;
5583 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5585 
5586 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587 				remote_adv |= LPA_1000XPAUSE;
5588 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589 				remote_adv |= LPA_1000XPAUSE_ASYM;
5590 
5591 			tp->link_config.rmt_adv =
5592 					   mii_adv_to_ethtool_adv_x(remote_adv);
5593 
5594 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5595 			current_link_up = true;
5596 			tp->serdes_counter = 0;
5597 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599 			if (tp->serdes_counter)
5600 				tp->serdes_counter--;
5601 			else {
5602 				if (workaround) {
5603 					u32 val = serdes_cfg;
5604 
5605 					if (port_a)
5606 						val |= 0xc010000;
5607 					else
5608 						val |= 0x4010000;
5609 
5610 					tw32_f(MAC_SERDES_CFG, val);
5611 				}
5612 
5613 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614 				udelay(40);
5615 
5616 				/* Link parallel detection - link is up */
5617 				/* only if we have PCS_SYNC and not */
5618 				/* receiving config code words */
5619 				mac_status = tr32(MAC_STATUS);
5620 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622 					tg3_setup_flow_control(tp, 0, 0);
5623 					current_link_up = true;
5624 					tp->phy_flags |=
5625 						TG3_PHYFLG_PARALLEL_DETECT;
5626 					tp->serdes_counter =
5627 						SERDES_PARALLEL_DET_TIMEOUT;
5628 				} else
5629 					goto restart_autoneg;
5630 			}
5631 		}
5632 	} else {
5633 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5635 	}
5636 
5637 out:
5638 	return current_link_up;
5639 }
5640 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 {
5643 	bool current_link_up = false;
5644 
5645 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646 		goto out;
5647 
5648 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649 		u32 txflags, rxflags;
5650 		int i;
5651 
5652 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653 			u32 local_adv = 0, remote_adv = 0;
5654 
5655 			if (txflags & ANEG_CFG_PS1)
5656 				local_adv |= ADVERTISE_1000XPAUSE;
5657 			if (txflags & ANEG_CFG_PS2)
5658 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5659 
5660 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661 				remote_adv |= LPA_1000XPAUSE;
5662 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663 				remote_adv |= LPA_1000XPAUSE_ASYM;
5664 
5665 			tp->link_config.rmt_adv =
5666 					   mii_adv_to_ethtool_adv_x(remote_adv);
5667 
5668 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5669 
5670 			current_link_up = true;
5671 		}
5672 		for (i = 0; i < 30; i++) {
5673 			udelay(20);
5674 			tw32_f(MAC_STATUS,
5675 			       (MAC_STATUS_SYNC_CHANGED |
5676 				MAC_STATUS_CFG_CHANGED));
5677 			udelay(40);
5678 			if ((tr32(MAC_STATUS) &
5679 			     (MAC_STATUS_SYNC_CHANGED |
5680 			      MAC_STATUS_CFG_CHANGED)) == 0)
5681 				break;
5682 		}
5683 
5684 		mac_status = tr32(MAC_STATUS);
5685 		if (!current_link_up &&
5686 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5688 			current_link_up = true;
5689 	} else {
5690 		tg3_setup_flow_control(tp, 0, 0);
5691 
5692 		/* Forcing 1000FD link up. */
5693 		current_link_up = true;
5694 
5695 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696 		udelay(40);
5697 
5698 		tw32_f(MAC_MODE, tp->mac_mode);
5699 		udelay(40);
5700 	}
5701 
5702 out:
5703 	return current_link_up;
5704 }
5705 
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 {
5708 	u32 orig_pause_cfg;
5709 	u32 orig_active_speed;
5710 	u8 orig_active_duplex;
5711 	u32 mac_status;
5712 	bool current_link_up;
5713 	int i;
5714 
5715 	orig_pause_cfg = tp->link_config.active_flowctrl;
5716 	orig_active_speed = tp->link_config.active_speed;
5717 	orig_active_duplex = tp->link_config.active_duplex;
5718 
5719 	if (!tg3_flag(tp, HW_AUTONEG) &&
5720 	    tp->link_up &&
5721 	    tg3_flag(tp, INIT_COMPLETE)) {
5722 		mac_status = tr32(MAC_STATUS);
5723 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5724 			       MAC_STATUS_SIGNAL_DET |
5725 			       MAC_STATUS_CFG_CHANGED |
5726 			       MAC_STATUS_RCVD_CFG);
5727 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728 				   MAC_STATUS_SIGNAL_DET)) {
5729 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730 					    MAC_STATUS_CFG_CHANGED));
5731 			return 0;
5732 		}
5733 	}
5734 
5735 	tw32_f(MAC_TX_AUTO_NEG, 0);
5736 
5737 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739 	tw32_f(MAC_MODE, tp->mac_mode);
5740 	udelay(40);
5741 
5742 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743 		tg3_init_bcm8002(tp);
5744 
5745 	/* Enable link change event even when serdes polling.  */
5746 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747 	udelay(40);
5748 
5749 	tp->link_config.rmt_adv = 0;
5750 	mac_status = tr32(MAC_STATUS);
5751 
5752 	if (tg3_flag(tp, HW_AUTONEG))
5753 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5754 	else
5755 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5756 
5757 	tp->napi[0].hw_status->status =
5758 		(SD_STATUS_UPDATED |
5759 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5760 
5761 	for (i = 0; i < 100; i++) {
5762 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763 				    MAC_STATUS_CFG_CHANGED));
5764 		udelay(5);
5765 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766 					 MAC_STATUS_CFG_CHANGED |
5767 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5768 			break;
5769 	}
5770 
5771 	mac_status = tr32(MAC_STATUS);
5772 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773 		current_link_up = false;
5774 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775 		    tp->serdes_counter == 0) {
5776 			tw32_f(MAC_MODE, (tp->mac_mode |
5777 					  MAC_MODE_SEND_CONFIGS));
5778 			udelay(1);
5779 			tw32_f(MAC_MODE, tp->mac_mode);
5780 		}
5781 	}
5782 
5783 	if (current_link_up) {
5784 		tp->link_config.active_speed = SPEED_1000;
5785 		tp->link_config.active_duplex = DUPLEX_FULL;
5786 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787 				    LED_CTRL_LNKLED_OVERRIDE |
5788 				    LED_CTRL_1000MBPS_ON));
5789 	} else {
5790 		tp->link_config.active_speed = SPEED_UNKNOWN;
5791 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 				    LED_CTRL_LNKLED_OVERRIDE |
5794 				    LED_CTRL_TRAFFIC_OVERRIDE));
5795 	}
5796 
5797 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799 		if (orig_pause_cfg != now_pause_cfg ||
5800 		    orig_active_speed != tp->link_config.active_speed ||
5801 		    orig_active_duplex != tp->link_config.active_duplex)
5802 			tg3_link_report(tp);
5803 	}
5804 
5805 	return 0;
5806 }
5807 
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5809 {
5810 	int err = 0;
5811 	u32 bmsr, bmcr;
5812 	u32 current_speed = SPEED_UNKNOWN;
5813 	u8 current_duplex = DUPLEX_UNKNOWN;
5814 	bool current_link_up = false;
5815 	u32 local_adv, remote_adv, sgsr;
5816 
5817 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5821 
5822 		if (force_reset)
5823 			tg3_phy_reset(tp);
5824 
5825 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5826 
5827 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 		} else {
5830 			current_link_up = true;
5831 			if (sgsr & SERDES_TG3_SPEED_1000) {
5832 				current_speed = SPEED_1000;
5833 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5835 				current_speed = SPEED_100;
5836 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 			} else {
5838 				current_speed = SPEED_10;
5839 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5840 			}
5841 
5842 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843 				current_duplex = DUPLEX_FULL;
5844 			else
5845 				current_duplex = DUPLEX_HALF;
5846 		}
5847 
5848 		tw32_f(MAC_MODE, tp->mac_mode);
5849 		udelay(40);
5850 
5851 		tg3_clear_mac_status(tp);
5852 
5853 		goto fiber_setup_done;
5854 	}
5855 
5856 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857 	tw32_f(MAC_MODE, tp->mac_mode);
5858 	udelay(40);
5859 
5860 	tg3_clear_mac_status(tp);
5861 
5862 	if (force_reset)
5863 		tg3_phy_reset(tp);
5864 
5865 	tp->link_config.rmt_adv = 0;
5866 
5867 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871 			bmsr |= BMSR_LSTATUS;
5872 		else
5873 			bmsr &= ~BMSR_LSTATUS;
5874 	}
5875 
5876 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5877 
5878 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880 		/* do nothing, just check for link up at the end */
5881 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5882 		u32 adv, newadv;
5883 
5884 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886 				 ADVERTISE_1000XPAUSE |
5887 				 ADVERTISE_1000XPSE_ASYM |
5888 				 ADVERTISE_SLCT);
5889 
5890 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5892 
5893 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5895 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896 			tg3_writephy(tp, MII_BMCR, bmcr);
5897 
5898 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5901 
5902 			return err;
5903 		}
5904 	} else {
5905 		u32 new_bmcr;
5906 
5907 		bmcr &= ~BMCR_SPEED1000;
5908 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5909 
5910 		if (tp->link_config.duplex == DUPLEX_FULL)
5911 			new_bmcr |= BMCR_FULLDPLX;
5912 
5913 		if (new_bmcr != bmcr) {
5914 			/* BMCR_SPEED1000 is a reserved bit that needs
5915 			 * to be set on write.
5916 			 */
5917 			new_bmcr |= BMCR_SPEED1000;
5918 
5919 			/* Force a linkdown */
5920 			if (tp->link_up) {
5921 				u32 adv;
5922 
5923 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924 				adv &= ~(ADVERTISE_1000XFULL |
5925 					 ADVERTISE_1000XHALF |
5926 					 ADVERTISE_SLCT);
5927 				tg3_writephy(tp, MII_ADVERTISE, adv);
5928 				tg3_writephy(tp, MII_BMCR, bmcr |
5929 							   BMCR_ANRESTART |
5930 							   BMCR_ANENABLE);
5931 				udelay(10);
5932 				tg3_carrier_off(tp);
5933 			}
5934 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5935 			bmcr = new_bmcr;
5936 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940 					bmsr |= BMSR_LSTATUS;
5941 				else
5942 					bmsr &= ~BMSR_LSTATUS;
5943 			}
5944 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5945 		}
5946 	}
5947 
5948 	if (bmsr & BMSR_LSTATUS) {
5949 		current_speed = SPEED_1000;
5950 		current_link_up = true;
5951 		if (bmcr & BMCR_FULLDPLX)
5952 			current_duplex = DUPLEX_FULL;
5953 		else
5954 			current_duplex = DUPLEX_HALF;
5955 
5956 		local_adv = 0;
5957 		remote_adv = 0;
5958 
5959 		if (bmcr & BMCR_ANENABLE) {
5960 			u32 common;
5961 
5962 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964 			common = local_adv & remote_adv;
5965 			if (common & (ADVERTISE_1000XHALF |
5966 				      ADVERTISE_1000XFULL)) {
5967 				if (common & ADVERTISE_1000XFULL)
5968 					current_duplex = DUPLEX_FULL;
5969 				else
5970 					current_duplex = DUPLEX_HALF;
5971 
5972 				tp->link_config.rmt_adv =
5973 					   mii_adv_to_ethtool_adv_x(remote_adv);
5974 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5975 				/* Link is up via parallel detect */
5976 			} else {
5977 				current_link_up = false;
5978 			}
5979 		}
5980 	}
5981 
5982 fiber_setup_done:
5983 	if (current_link_up && current_duplex == DUPLEX_FULL)
5984 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5985 
5986 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5988 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5989 
5990 	tw32_f(MAC_MODE, tp->mac_mode);
5991 	udelay(40);
5992 
5993 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5994 
5995 	tp->link_config.active_speed = current_speed;
5996 	tp->link_config.active_duplex = current_duplex;
5997 
5998 	tg3_test_and_report_link_chg(tp, current_link_up);
5999 	return err;
6000 }
6001 
tg3_serdes_parallel_detect(struct tg3 * tp)6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6003 {
6004 	if (tp->serdes_counter) {
6005 		/* Give autoneg time to complete. */
6006 		tp->serdes_counter--;
6007 		return;
6008 	}
6009 
6010 	if (!tp->link_up &&
6011 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6012 		u32 bmcr;
6013 
6014 		tg3_readphy(tp, MII_BMCR, &bmcr);
6015 		if (bmcr & BMCR_ANENABLE) {
6016 			u32 phy1, phy2;
6017 
6018 			/* Select shadow register 0x1f */
6019 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6021 
6022 			/* Select expansion interrupt status register */
6023 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024 					 MII_TG3_DSP_EXP1_INT_STAT);
6025 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 
6028 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029 				/* We have signal detect and not receiving
6030 				 * config code words, link is up by parallel
6031 				 * detection.
6032 				 */
6033 
6034 				bmcr &= ~BMCR_ANENABLE;
6035 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036 				tg3_writephy(tp, MII_BMCR, bmcr);
6037 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6038 			}
6039 		}
6040 	} else if (tp->link_up &&
6041 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6043 		u32 phy2;
6044 
6045 		/* Select expansion interrupt status register */
6046 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047 				 MII_TG3_DSP_EXP1_INT_STAT);
6048 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6049 		if (phy2 & 0x20) {
6050 			u32 bmcr;
6051 
6052 			/* Config code words received, turn on autoneg. */
6053 			tg3_readphy(tp, MII_BMCR, &bmcr);
6054 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6055 
6056 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6057 
6058 		}
6059 	}
6060 }
6061 
tg3_setup_phy(struct tg3 * tp,bool force_reset)6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6063 {
6064 	u32 val;
6065 	int err;
6066 
6067 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068 		err = tg3_setup_fiber_phy(tp, force_reset);
6069 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6071 	else
6072 		err = tg3_setup_copper_phy(tp, force_reset);
6073 
6074 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6075 		u32 scale;
6076 
6077 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6079 			scale = 65;
6080 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6081 			scale = 6;
6082 		else
6083 			scale = 12;
6084 
6085 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 		tw32(GRC_MISC_CFG, val);
6088 	}
6089 
6090 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091 	      (6 << TX_LENGTHS_IPG_SHIFT);
6092 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6094 		val |= tr32(MAC_TX_LENGTHS) &
6095 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6097 
6098 	if (tp->link_config.active_speed == SPEED_1000 &&
6099 	    tp->link_config.active_duplex == DUPLEX_HALF)
6100 		tw32(MAC_TX_LENGTHS, val |
6101 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6102 	else
6103 		tw32(MAC_TX_LENGTHS, val |
6104 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 
6106 	if (!tg3_flag(tp, 5705_PLUS)) {
6107 		if (tp->link_up) {
6108 			tw32(HOSTCC_STAT_COAL_TICKS,
6109 			     tp->coal.stats_block_coalesce_usecs);
6110 		} else {
6111 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6112 		}
6113 	}
6114 
6115 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116 		val = tr32(PCIE_PWR_MGMT_THRESH);
6117 		if (!tp->link_up)
6118 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6119 			      tp->pwrmgmt_thresh;
6120 		else
6121 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122 		tw32(PCIE_PWR_MGMT_THRESH, val);
6123 	}
6124 
6125 	return err;
6126 }
6127 
6128 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6129 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6130 {
6131 	u64 stamp;
6132 
6133 	ptp_read_system_prets(sts);
6134 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135 	ptp_read_system_postts(sts);
6136 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6137 
6138 	return stamp;
6139 }
6140 
6141 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6143 {
6144 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6145 
6146 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6150 }
6151 
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6155 {
6156 	struct tg3 *tp = netdev_priv(dev);
6157 
6158 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 				SOF_TIMESTAMPING_RX_SOFTWARE |
6160 				SOF_TIMESTAMPING_SOFTWARE;
6161 
6162 	if (tg3_flag(tp, PTP_CAPABLE)) {
6163 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 					SOF_TIMESTAMPING_RX_HARDWARE |
6165 					SOF_TIMESTAMPING_RAW_HARDWARE;
6166 	}
6167 
6168 	if (tp->ptp_clock)
6169 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6170 	else
6171 		info->phc_index = -1;
6172 
6173 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6174 
6175 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6179 	return 0;
6180 }
6181 
tg3_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6183 {
6184 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185 	bool neg_adj = false;
6186 	u32 correction = 0;
6187 
6188 	if (ppb < 0) {
6189 		neg_adj = true;
6190 		ppb = -ppb;
6191 	}
6192 
6193 	/* Frequency adjustment is performed using hardware with a 24 bit
6194 	 * accumulator and a programmable correction value. On each clk, the
6195 	 * correction value gets added to the accumulator and when it
6196 	 * overflows, the time counter is incremented/decremented.
6197 	 *
6198 	 * So conversion from ppb to correction value is
6199 	 *		ppb * (1 << 24) / 1000000000
6200 	 */
6201 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6203 
6204 	tg3_full_lock(tp, 0);
6205 
6206 	if (correction)
6207 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208 		     TG3_EAV_REF_CLK_CORRECT_EN |
6209 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6210 	else
6211 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6212 
6213 	tg3_full_unlock(tp);
6214 
6215 	return 0;
6216 }
6217 
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6219 {
6220 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6221 
6222 	tg3_full_lock(tp, 0);
6223 	tp->ptp_adjust += delta;
6224 	tg3_full_unlock(tp);
6225 
6226 	return 0;
6227 }
6228 
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6229 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6230 			    struct ptp_system_timestamp *sts)
6231 {
6232 	u64 ns;
6233 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6234 
6235 	tg3_full_lock(tp, 0);
6236 	ns = tg3_refclk_read(tp, sts);
6237 	ns += tp->ptp_adjust;
6238 	tg3_full_unlock(tp);
6239 
6240 	*ts = ns_to_timespec64(ns);
6241 
6242 	return 0;
6243 }
6244 
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6245 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6246 			   const struct timespec64 *ts)
6247 {
6248 	u64 ns;
6249 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6250 
6251 	ns = timespec64_to_ns(ts);
6252 
6253 	tg3_full_lock(tp, 0);
6254 	tg3_refclk_write(tp, ns);
6255 	tp->ptp_adjust = 0;
6256 	tg3_full_unlock(tp);
6257 
6258 	return 0;
6259 }
6260 
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6261 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6262 			  struct ptp_clock_request *rq, int on)
6263 {
6264 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6265 	u32 clock_ctl;
6266 	int rval = 0;
6267 
6268 	switch (rq->type) {
6269 	case PTP_CLK_REQ_PEROUT:
6270 		/* Reject requests with unsupported flags */
6271 		if (rq->perout.flags)
6272 			return -EOPNOTSUPP;
6273 
6274 		if (rq->perout.index != 0)
6275 			return -EINVAL;
6276 
6277 		tg3_full_lock(tp, 0);
6278 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6279 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6280 
6281 		if (on) {
6282 			u64 nsec;
6283 
6284 			nsec = rq->perout.start.sec * 1000000000ULL +
6285 			       rq->perout.start.nsec;
6286 
6287 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6288 				netdev_warn(tp->dev,
6289 					    "Device supports only a one-shot timesync output, period must be 0\n");
6290 				rval = -EINVAL;
6291 				goto err_out;
6292 			}
6293 
6294 			if (nsec & (1ULL << 63)) {
6295 				netdev_warn(tp->dev,
6296 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6297 				rval = -EINVAL;
6298 				goto err_out;
6299 			}
6300 
6301 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6302 			tw32(TG3_EAV_WATCHDOG0_MSB,
6303 			     TG3_EAV_WATCHDOG0_EN |
6304 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6305 
6306 			tw32(TG3_EAV_REF_CLCK_CTL,
6307 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6308 		} else {
6309 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6310 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6311 		}
6312 
6313 err_out:
6314 		tg3_full_unlock(tp);
6315 		return rval;
6316 
6317 	default:
6318 		break;
6319 	}
6320 
6321 	return -EOPNOTSUPP;
6322 }
6323 
6324 static const struct ptp_clock_info tg3_ptp_caps = {
6325 	.owner		= THIS_MODULE,
6326 	.name		= "tg3 clock",
6327 	.max_adj	= 250000000,
6328 	.n_alarm	= 0,
6329 	.n_ext_ts	= 0,
6330 	.n_per_out	= 1,
6331 	.n_pins		= 0,
6332 	.pps		= 0,
6333 	.adjfreq	= tg3_ptp_adjfreq,
6334 	.adjtime	= tg3_ptp_adjtime,
6335 	.gettimex64	= tg3_ptp_gettimex,
6336 	.settime64	= tg3_ptp_settime,
6337 	.enable		= tg3_ptp_enable,
6338 };
6339 
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6340 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6341 				     struct skb_shared_hwtstamps *timestamp)
6342 {
6343 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6344 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6345 					   tp->ptp_adjust);
6346 }
6347 
6348 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6349 static void tg3_ptp_init(struct tg3 *tp)
6350 {
6351 	if (!tg3_flag(tp, PTP_CAPABLE))
6352 		return;
6353 
6354 	/* Initialize the hardware clock to the system time. */
6355 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6356 	tp->ptp_adjust = 0;
6357 	tp->ptp_info = tg3_ptp_caps;
6358 }
6359 
6360 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6361 static void tg3_ptp_resume(struct tg3 *tp)
6362 {
6363 	if (!tg3_flag(tp, PTP_CAPABLE))
6364 		return;
6365 
6366 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6367 	tp->ptp_adjust = 0;
6368 }
6369 
tg3_ptp_fini(struct tg3 * tp)6370 static void tg3_ptp_fini(struct tg3 *tp)
6371 {
6372 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6373 		return;
6374 
6375 	ptp_clock_unregister(tp->ptp_clock);
6376 	tp->ptp_clock = NULL;
6377 	tp->ptp_adjust = 0;
6378 }
6379 
tg3_irq_sync(struct tg3 * tp)6380 static inline int tg3_irq_sync(struct tg3 *tp)
6381 {
6382 	return tp->irq_sync;
6383 }
6384 
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6385 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6386 {
6387 	int i;
6388 
6389 	dst = (u32 *)((u8 *)dst + off);
6390 	for (i = 0; i < len; i += sizeof(u32))
6391 		*dst++ = tr32(off + i);
6392 }
6393 
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6394 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6395 {
6396 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6397 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6398 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6399 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6400 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6401 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6402 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6403 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6404 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6405 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6406 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6407 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6408 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6409 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6410 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6411 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6412 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6413 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6414 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6415 
6416 	if (tg3_flag(tp, SUPPORT_MSIX))
6417 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6418 
6419 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6420 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6421 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6422 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6423 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6424 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6425 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6426 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6427 
6428 	if (!tg3_flag(tp, 5705_PLUS)) {
6429 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6430 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6431 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6432 	}
6433 
6434 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6435 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6436 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6437 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6438 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6439 
6440 	if (tg3_flag(tp, NVRAM))
6441 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6442 }
6443 
tg3_dump_state(struct tg3 * tp)6444 static void tg3_dump_state(struct tg3 *tp)
6445 {
6446 	int i;
6447 	u32 *regs;
6448 
6449 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6450 	if (!regs)
6451 		return;
6452 
6453 	if (tg3_flag(tp, PCI_EXPRESS)) {
6454 		/* Read up to but not including private PCI registers */
6455 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6456 			regs[i / sizeof(u32)] = tr32(i);
6457 	} else
6458 		tg3_dump_legacy_regs(tp, regs);
6459 
6460 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6461 		if (!regs[i + 0] && !regs[i + 1] &&
6462 		    !regs[i + 2] && !regs[i + 3])
6463 			continue;
6464 
6465 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6466 			   i * 4,
6467 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6468 	}
6469 
6470 	kfree(regs);
6471 
6472 	for (i = 0; i < tp->irq_cnt; i++) {
6473 		struct tg3_napi *tnapi = &tp->napi[i];
6474 
6475 		/* SW status block */
6476 		netdev_err(tp->dev,
6477 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6478 			   i,
6479 			   tnapi->hw_status->status,
6480 			   tnapi->hw_status->status_tag,
6481 			   tnapi->hw_status->rx_jumbo_consumer,
6482 			   tnapi->hw_status->rx_consumer,
6483 			   tnapi->hw_status->rx_mini_consumer,
6484 			   tnapi->hw_status->idx[0].rx_producer,
6485 			   tnapi->hw_status->idx[0].tx_consumer);
6486 
6487 		netdev_err(tp->dev,
6488 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6489 			   i,
6490 			   tnapi->last_tag, tnapi->last_irq_tag,
6491 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6492 			   tnapi->rx_rcb_ptr,
6493 			   tnapi->prodring.rx_std_prod_idx,
6494 			   tnapi->prodring.rx_std_cons_idx,
6495 			   tnapi->prodring.rx_jmb_prod_idx,
6496 			   tnapi->prodring.rx_jmb_cons_idx);
6497 	}
6498 }
6499 
6500 /* This is called whenever we suspect that the system chipset is re-
6501  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6502  * is bogus tx completions. We try to recover by setting the
6503  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6504  * in the workqueue.
6505  */
tg3_tx_recover(struct tg3 * tp)6506 static void tg3_tx_recover(struct tg3 *tp)
6507 {
6508 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6509 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6510 
6511 	netdev_warn(tp->dev,
6512 		    "The system may be re-ordering memory-mapped I/O "
6513 		    "cycles to the network device, attempting to recover. "
6514 		    "Please report the problem to the driver maintainer "
6515 		    "and include system chipset information.\n");
6516 
6517 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6518 }
6519 
tg3_tx_avail(struct tg3_napi * tnapi)6520 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6521 {
6522 	/* Tell compiler to fetch tx indices from memory. */
6523 	barrier();
6524 	return tnapi->tx_pending -
6525 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6526 }
6527 
6528 /* Tigon3 never reports partial packet sends.  So we do not
6529  * need special logic to handle SKBs that have not had all
6530  * of their frags sent yet, like SunGEM does.
6531  */
tg3_tx(struct tg3_napi * tnapi)6532 static void tg3_tx(struct tg3_napi *tnapi)
6533 {
6534 	struct tg3 *tp = tnapi->tp;
6535 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6536 	u32 sw_idx = tnapi->tx_cons;
6537 	struct netdev_queue *txq;
6538 	int index = tnapi - tp->napi;
6539 	unsigned int pkts_compl = 0, bytes_compl = 0;
6540 
6541 	if (tg3_flag(tp, ENABLE_TSS))
6542 		index--;
6543 
6544 	txq = netdev_get_tx_queue(tp->dev, index);
6545 
6546 	while (sw_idx != hw_idx) {
6547 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6548 		struct sk_buff *skb = ri->skb;
6549 		int i, tx_bug = 0;
6550 
6551 		if (unlikely(skb == NULL)) {
6552 			tg3_tx_recover(tp);
6553 			return;
6554 		}
6555 
6556 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6557 			struct skb_shared_hwtstamps timestamp;
6558 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6559 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6560 
6561 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6562 
6563 			skb_tstamp_tx(skb, &timestamp);
6564 		}
6565 
6566 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6567 				 skb_headlen(skb), DMA_TO_DEVICE);
6568 
6569 		ri->skb = NULL;
6570 
6571 		while (ri->fragmented) {
6572 			ri->fragmented = false;
6573 			sw_idx = NEXT_TX(sw_idx);
6574 			ri = &tnapi->tx_buffers[sw_idx];
6575 		}
6576 
6577 		sw_idx = NEXT_TX(sw_idx);
6578 
6579 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6580 			ri = &tnapi->tx_buffers[sw_idx];
6581 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6582 				tx_bug = 1;
6583 
6584 			dma_unmap_page(&tp->pdev->dev,
6585 				       dma_unmap_addr(ri, mapping),
6586 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6587 				       DMA_TO_DEVICE);
6588 
6589 			while (ri->fragmented) {
6590 				ri->fragmented = false;
6591 				sw_idx = NEXT_TX(sw_idx);
6592 				ri = &tnapi->tx_buffers[sw_idx];
6593 			}
6594 
6595 			sw_idx = NEXT_TX(sw_idx);
6596 		}
6597 
6598 		pkts_compl++;
6599 		bytes_compl += skb->len;
6600 
6601 		dev_consume_skb_any(skb);
6602 
6603 		if (unlikely(tx_bug)) {
6604 			tg3_tx_recover(tp);
6605 			return;
6606 		}
6607 	}
6608 
6609 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6610 
6611 	tnapi->tx_cons = sw_idx;
6612 
6613 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6614 	 * before checking for netif_queue_stopped().  Without the
6615 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6616 	 * will miss it and cause the queue to be stopped forever.
6617 	 */
6618 	smp_mb();
6619 
6620 	if (unlikely(netif_tx_queue_stopped(txq) &&
6621 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6622 		__netif_tx_lock(txq, smp_processor_id());
6623 		if (netif_tx_queue_stopped(txq) &&
6624 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6625 			netif_tx_wake_queue(txq);
6626 		__netif_tx_unlock(txq);
6627 	}
6628 }
6629 
tg3_frag_free(bool is_frag,void * data)6630 static void tg3_frag_free(bool is_frag, void *data)
6631 {
6632 	if (is_frag)
6633 		skb_free_frag(data);
6634 	else
6635 		kfree(data);
6636 }
6637 
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6638 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6639 {
6640 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6641 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6642 
6643 	if (!ri->data)
6644 		return;
6645 
6646 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6647 			 DMA_FROM_DEVICE);
6648 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6649 	ri->data = NULL;
6650 }
6651 
6652 
6653 /* Returns size of skb allocated or < 0 on error.
6654  *
6655  * We only need to fill in the address because the other members
6656  * of the RX descriptor are invariant, see tg3_init_rings.
6657  *
6658  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6659  * posting buffers we only dirty the first cache line of the RX
6660  * descriptor (containing the address).  Whereas for the RX status
6661  * buffers the cpu only reads the last cacheline of the RX descriptor
6662  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6663  */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6664 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6665 			     u32 opaque_key, u32 dest_idx_unmasked,
6666 			     unsigned int *frag_size)
6667 {
6668 	struct tg3_rx_buffer_desc *desc;
6669 	struct ring_info *map;
6670 	u8 *data;
6671 	dma_addr_t mapping;
6672 	int skb_size, data_size, dest_idx;
6673 
6674 	switch (opaque_key) {
6675 	case RXD_OPAQUE_RING_STD:
6676 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6677 		desc = &tpr->rx_std[dest_idx];
6678 		map = &tpr->rx_std_buffers[dest_idx];
6679 		data_size = tp->rx_pkt_map_sz;
6680 		break;
6681 
6682 	case RXD_OPAQUE_RING_JUMBO:
6683 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6684 		desc = &tpr->rx_jmb[dest_idx].std;
6685 		map = &tpr->rx_jmb_buffers[dest_idx];
6686 		data_size = TG3_RX_JMB_MAP_SZ;
6687 		break;
6688 
6689 	default:
6690 		return -EINVAL;
6691 	}
6692 
6693 	/* Do not overwrite any of the map or rp information
6694 	 * until we are sure we can commit to a new buffer.
6695 	 *
6696 	 * Callers depend upon this behavior and assume that
6697 	 * we leave everything unchanged if we fail.
6698 	 */
6699 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6700 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6701 	if (skb_size <= PAGE_SIZE) {
6702 		data = napi_alloc_frag(skb_size);
6703 		*frag_size = skb_size;
6704 	} else {
6705 		data = kmalloc(skb_size, GFP_ATOMIC);
6706 		*frag_size = 0;
6707 	}
6708 	if (!data)
6709 		return -ENOMEM;
6710 
6711 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6712 				 data_size, DMA_FROM_DEVICE);
6713 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6714 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6715 		return -EIO;
6716 	}
6717 
6718 	map->data = data;
6719 	dma_unmap_addr_set(map, mapping, mapping);
6720 
6721 	desc->addr_hi = ((u64)mapping >> 32);
6722 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6723 
6724 	return data_size;
6725 }
6726 
6727 /* We only need to move over in the address because the other
6728  * members of the RX descriptor are invariant.  See notes above
6729  * tg3_alloc_rx_data for full details.
6730  */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6731 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6732 			   struct tg3_rx_prodring_set *dpr,
6733 			   u32 opaque_key, int src_idx,
6734 			   u32 dest_idx_unmasked)
6735 {
6736 	struct tg3 *tp = tnapi->tp;
6737 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6738 	struct ring_info *src_map, *dest_map;
6739 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6740 	int dest_idx;
6741 
6742 	switch (opaque_key) {
6743 	case RXD_OPAQUE_RING_STD:
6744 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6745 		dest_desc = &dpr->rx_std[dest_idx];
6746 		dest_map = &dpr->rx_std_buffers[dest_idx];
6747 		src_desc = &spr->rx_std[src_idx];
6748 		src_map = &spr->rx_std_buffers[src_idx];
6749 		break;
6750 
6751 	case RXD_OPAQUE_RING_JUMBO:
6752 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6753 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6754 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6755 		src_desc = &spr->rx_jmb[src_idx].std;
6756 		src_map = &spr->rx_jmb_buffers[src_idx];
6757 		break;
6758 
6759 	default:
6760 		return;
6761 	}
6762 
6763 	dest_map->data = src_map->data;
6764 	dma_unmap_addr_set(dest_map, mapping,
6765 			   dma_unmap_addr(src_map, mapping));
6766 	dest_desc->addr_hi = src_desc->addr_hi;
6767 	dest_desc->addr_lo = src_desc->addr_lo;
6768 
6769 	/* Ensure that the update to the skb happens after the physical
6770 	 * addresses have been transferred to the new BD location.
6771 	 */
6772 	smp_wmb();
6773 
6774 	src_map->data = NULL;
6775 }
6776 
6777 /* The RX ring scheme is composed of multiple rings which post fresh
6778  * buffers to the chip, and one special ring the chip uses to report
6779  * status back to the host.
6780  *
6781  * The special ring reports the status of received packets to the
6782  * host.  The chip does not write into the original descriptor the
6783  * RX buffer was obtained from.  The chip simply takes the original
6784  * descriptor as provided by the host, updates the status and length
6785  * field, then writes this into the next status ring entry.
6786  *
6787  * Each ring the host uses to post buffers to the chip is described
6788  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6789  * it is first placed into the on-chip ram.  When the packet's length
6790  * is known, it walks down the TG3_BDINFO entries to select the ring.
6791  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6792  * which is within the range of the new packet's length is chosen.
6793  *
6794  * The "separate ring for rx status" scheme may sound queer, but it makes
6795  * sense from a cache coherency perspective.  If only the host writes
6796  * to the buffer post rings, and only the chip writes to the rx status
6797  * rings, then cache lines never move beyond shared-modified state.
6798  * If both the host and chip were to write into the same ring, cache line
6799  * eviction could occur since both entities want it in an exclusive state.
6800  */
tg3_rx(struct tg3_napi * tnapi,int budget)6801 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6802 {
6803 	struct tg3 *tp = tnapi->tp;
6804 	u32 work_mask, rx_std_posted = 0;
6805 	u32 std_prod_idx, jmb_prod_idx;
6806 	u32 sw_idx = tnapi->rx_rcb_ptr;
6807 	u16 hw_idx;
6808 	int received;
6809 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6810 
6811 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6812 	/*
6813 	 * We need to order the read of hw_idx and the read of
6814 	 * the opaque cookie.
6815 	 */
6816 	rmb();
6817 	work_mask = 0;
6818 	received = 0;
6819 	std_prod_idx = tpr->rx_std_prod_idx;
6820 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6821 	while (sw_idx != hw_idx && budget > 0) {
6822 		struct ring_info *ri;
6823 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6824 		unsigned int len;
6825 		struct sk_buff *skb;
6826 		dma_addr_t dma_addr;
6827 		u32 opaque_key, desc_idx, *post_ptr;
6828 		u8 *data;
6829 		u64 tstamp = 0;
6830 
6831 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6832 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6833 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6834 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6835 			dma_addr = dma_unmap_addr(ri, mapping);
6836 			data = ri->data;
6837 			post_ptr = &std_prod_idx;
6838 			rx_std_posted++;
6839 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6840 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6841 			dma_addr = dma_unmap_addr(ri, mapping);
6842 			data = ri->data;
6843 			post_ptr = &jmb_prod_idx;
6844 		} else
6845 			goto next_pkt_nopost;
6846 
6847 		work_mask |= opaque_key;
6848 
6849 		if (desc->err_vlan & RXD_ERR_MASK) {
6850 		drop_it:
6851 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6852 				       desc_idx, *post_ptr);
6853 		drop_it_no_recycle:
6854 			/* Other statistics kept track of by card. */
6855 			tp->rx_dropped++;
6856 			goto next_pkt;
6857 		}
6858 
6859 		prefetch(data + TG3_RX_OFFSET(tp));
6860 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6861 		      ETH_FCS_LEN;
6862 
6863 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6864 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6865 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 		     RXD_FLAG_PTPSTAT_PTPV2) {
6867 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6868 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6869 		}
6870 
6871 		if (len > TG3_RX_COPY_THRESH(tp)) {
6872 			int skb_size;
6873 			unsigned int frag_size;
6874 
6875 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6876 						    *post_ptr, &frag_size);
6877 			if (skb_size < 0)
6878 				goto drop_it;
6879 
6880 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6881 					 DMA_FROM_DEVICE);
6882 
6883 			/* Ensure that the update to the data happens
6884 			 * after the usage of the old DMA mapping.
6885 			 */
6886 			smp_wmb();
6887 
6888 			ri->data = NULL;
6889 
6890 			skb = build_skb(data, frag_size);
6891 			if (!skb) {
6892 				tg3_frag_free(frag_size != 0, data);
6893 				goto drop_it_no_recycle;
6894 			}
6895 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6896 		} else {
6897 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6898 				       desc_idx, *post_ptr);
6899 
6900 			skb = netdev_alloc_skb(tp->dev,
6901 					       len + TG3_RAW_IP_ALIGN);
6902 			if (skb == NULL)
6903 				goto drop_it_no_recycle;
6904 
6905 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6906 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6907 						DMA_FROM_DEVICE);
6908 			memcpy(skb->data,
6909 			       data + TG3_RX_OFFSET(tp),
6910 			       len);
6911 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6912 						   len, DMA_FROM_DEVICE);
6913 		}
6914 
6915 		skb_put(skb, len);
6916 		if (tstamp)
6917 			tg3_hwclock_to_timestamp(tp, tstamp,
6918 						 skb_hwtstamps(skb));
6919 
6920 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6921 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6922 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6923 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6924 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6925 		else
6926 			skb_checksum_none_assert(skb);
6927 
6928 		skb->protocol = eth_type_trans(skb, tp->dev);
6929 
6930 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6931 		    skb->protocol != htons(ETH_P_8021Q) &&
6932 		    skb->protocol != htons(ETH_P_8021AD)) {
6933 			dev_kfree_skb_any(skb);
6934 			goto drop_it_no_recycle;
6935 		}
6936 
6937 		if (desc->type_flags & RXD_FLAG_VLAN &&
6938 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6939 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6940 					       desc->err_vlan & RXD_VLAN_MASK);
6941 
6942 		napi_gro_receive(&tnapi->napi, skb);
6943 
6944 		received++;
6945 		budget--;
6946 
6947 next_pkt:
6948 		(*post_ptr)++;
6949 
6950 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6951 			tpr->rx_std_prod_idx = std_prod_idx &
6952 					       tp->rx_std_ring_mask;
6953 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6954 				     tpr->rx_std_prod_idx);
6955 			work_mask &= ~RXD_OPAQUE_RING_STD;
6956 			rx_std_posted = 0;
6957 		}
6958 next_pkt_nopost:
6959 		sw_idx++;
6960 		sw_idx &= tp->rx_ret_ring_mask;
6961 
6962 		/* Refresh hw_idx to see if there is new work */
6963 		if (sw_idx == hw_idx) {
6964 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6965 			rmb();
6966 		}
6967 	}
6968 
6969 	/* ACK the status ring. */
6970 	tnapi->rx_rcb_ptr = sw_idx;
6971 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6972 
6973 	/* Refill RX ring(s). */
6974 	if (!tg3_flag(tp, ENABLE_RSS)) {
6975 		/* Sync BD data before updating mailbox */
6976 		wmb();
6977 
6978 		if (work_mask & RXD_OPAQUE_RING_STD) {
6979 			tpr->rx_std_prod_idx = std_prod_idx &
6980 					       tp->rx_std_ring_mask;
6981 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6982 				     tpr->rx_std_prod_idx);
6983 		}
6984 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6985 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6986 					       tp->rx_jmb_ring_mask;
6987 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6988 				     tpr->rx_jmb_prod_idx);
6989 		}
6990 	} else if (work_mask) {
6991 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6992 		 * updated before the producer indices can be updated.
6993 		 */
6994 		smp_wmb();
6995 
6996 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6997 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6998 
6999 		if (tnapi != &tp->napi[1]) {
7000 			tp->rx_refill = true;
7001 			napi_schedule(&tp->napi[1].napi);
7002 		}
7003 	}
7004 
7005 	return received;
7006 }
7007 
tg3_poll_link(struct tg3 * tp)7008 static void tg3_poll_link(struct tg3 *tp)
7009 {
7010 	/* handle link change and other phy events */
7011 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7012 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7013 
7014 		if (sblk->status & SD_STATUS_LINK_CHG) {
7015 			sblk->status = SD_STATUS_UPDATED |
7016 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7017 			spin_lock(&tp->lock);
7018 			if (tg3_flag(tp, USE_PHYLIB)) {
7019 				tw32_f(MAC_STATUS,
7020 				     (MAC_STATUS_SYNC_CHANGED |
7021 				      MAC_STATUS_CFG_CHANGED |
7022 				      MAC_STATUS_MI_COMPLETION |
7023 				      MAC_STATUS_LNKSTATE_CHANGED));
7024 				udelay(40);
7025 			} else
7026 				tg3_setup_phy(tp, false);
7027 			spin_unlock(&tp->lock);
7028 		}
7029 	}
7030 }
7031 
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7032 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7033 				struct tg3_rx_prodring_set *dpr,
7034 				struct tg3_rx_prodring_set *spr)
7035 {
7036 	u32 si, di, cpycnt, src_prod_idx;
7037 	int i, err = 0;
7038 
7039 	while (1) {
7040 		src_prod_idx = spr->rx_std_prod_idx;
7041 
7042 		/* Make sure updates to the rx_std_buffers[] entries and the
7043 		 * standard producer index are seen in the correct order.
7044 		 */
7045 		smp_rmb();
7046 
7047 		if (spr->rx_std_cons_idx == src_prod_idx)
7048 			break;
7049 
7050 		if (spr->rx_std_cons_idx < src_prod_idx)
7051 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7052 		else
7053 			cpycnt = tp->rx_std_ring_mask + 1 -
7054 				 spr->rx_std_cons_idx;
7055 
7056 		cpycnt = min(cpycnt,
7057 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7058 
7059 		si = spr->rx_std_cons_idx;
7060 		di = dpr->rx_std_prod_idx;
7061 
7062 		for (i = di; i < di + cpycnt; i++) {
7063 			if (dpr->rx_std_buffers[i].data) {
7064 				cpycnt = i - di;
7065 				err = -ENOSPC;
7066 				break;
7067 			}
7068 		}
7069 
7070 		if (!cpycnt)
7071 			break;
7072 
7073 		/* Ensure that updates to the rx_std_buffers ring and the
7074 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7075 		 * ordered correctly WRT the skb check above.
7076 		 */
7077 		smp_rmb();
7078 
7079 		memcpy(&dpr->rx_std_buffers[di],
7080 		       &spr->rx_std_buffers[si],
7081 		       cpycnt * sizeof(struct ring_info));
7082 
7083 		for (i = 0; i < cpycnt; i++, di++, si++) {
7084 			struct tg3_rx_buffer_desc *sbd, *dbd;
7085 			sbd = &spr->rx_std[si];
7086 			dbd = &dpr->rx_std[di];
7087 			dbd->addr_hi = sbd->addr_hi;
7088 			dbd->addr_lo = sbd->addr_lo;
7089 		}
7090 
7091 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7092 				       tp->rx_std_ring_mask;
7093 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7094 				       tp->rx_std_ring_mask;
7095 	}
7096 
7097 	while (1) {
7098 		src_prod_idx = spr->rx_jmb_prod_idx;
7099 
7100 		/* Make sure updates to the rx_jmb_buffers[] entries and
7101 		 * the jumbo producer index are seen in the correct order.
7102 		 */
7103 		smp_rmb();
7104 
7105 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7106 			break;
7107 
7108 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7109 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7110 		else
7111 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7112 				 spr->rx_jmb_cons_idx;
7113 
7114 		cpycnt = min(cpycnt,
7115 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7116 
7117 		si = spr->rx_jmb_cons_idx;
7118 		di = dpr->rx_jmb_prod_idx;
7119 
7120 		for (i = di; i < di + cpycnt; i++) {
7121 			if (dpr->rx_jmb_buffers[i].data) {
7122 				cpycnt = i - di;
7123 				err = -ENOSPC;
7124 				break;
7125 			}
7126 		}
7127 
7128 		if (!cpycnt)
7129 			break;
7130 
7131 		/* Ensure that updates to the rx_jmb_buffers ring and the
7132 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7133 		 * ordered correctly WRT the skb check above.
7134 		 */
7135 		smp_rmb();
7136 
7137 		memcpy(&dpr->rx_jmb_buffers[di],
7138 		       &spr->rx_jmb_buffers[si],
7139 		       cpycnt * sizeof(struct ring_info));
7140 
7141 		for (i = 0; i < cpycnt; i++, di++, si++) {
7142 			struct tg3_rx_buffer_desc *sbd, *dbd;
7143 			sbd = &spr->rx_jmb[si].std;
7144 			dbd = &dpr->rx_jmb[di].std;
7145 			dbd->addr_hi = sbd->addr_hi;
7146 			dbd->addr_lo = sbd->addr_lo;
7147 		}
7148 
7149 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7150 				       tp->rx_jmb_ring_mask;
7151 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7152 				       tp->rx_jmb_ring_mask;
7153 	}
7154 
7155 	return err;
7156 }
7157 
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7158 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7159 {
7160 	struct tg3 *tp = tnapi->tp;
7161 
7162 	/* run TX completion thread */
7163 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7164 		tg3_tx(tnapi);
7165 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7166 			return work_done;
7167 	}
7168 
7169 	if (!tnapi->rx_rcb_prod_idx)
7170 		return work_done;
7171 
7172 	/* run RX thread, within the bounds set by NAPI.
7173 	 * All RX "locking" is done by ensuring outside
7174 	 * code synchronizes with tg3->napi.poll()
7175 	 */
7176 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7177 		work_done += tg3_rx(tnapi, budget - work_done);
7178 
7179 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7180 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7181 		int i, err = 0;
7182 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7183 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7184 
7185 		tp->rx_refill = false;
7186 		for (i = 1; i <= tp->rxq_cnt; i++)
7187 			err |= tg3_rx_prodring_xfer(tp, dpr,
7188 						    &tp->napi[i].prodring);
7189 
7190 		wmb();
7191 
7192 		if (std_prod_idx != dpr->rx_std_prod_idx)
7193 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7194 				     dpr->rx_std_prod_idx);
7195 
7196 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7197 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7198 				     dpr->rx_jmb_prod_idx);
7199 
7200 		if (err)
7201 			tw32_f(HOSTCC_MODE, tp->coal_now);
7202 	}
7203 
7204 	return work_done;
7205 }
7206 
tg3_reset_task_schedule(struct tg3 * tp)7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7208 {
7209 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210 		schedule_work(&tp->reset_task);
7211 }
7212 
tg3_reset_task_cancel(struct tg3 * tp)7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7214 {
7215 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7216 		cancel_work_sync(&tp->reset_task);
7217 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7218 }
7219 
tg3_poll_msix(struct napi_struct * napi,int budget)7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7221 {
7222 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223 	struct tg3 *tp = tnapi->tp;
7224 	int work_done = 0;
7225 	struct tg3_hw_status *sblk = tnapi->hw_status;
7226 
7227 	while (1) {
7228 		work_done = tg3_poll_work(tnapi, work_done, budget);
7229 
7230 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7231 			goto tx_recovery;
7232 
7233 		if (unlikely(work_done >= budget))
7234 			break;
7235 
7236 		/* tp->last_tag is used in tg3_int_reenable() below
7237 		 * to tell the hw how much work has been processed,
7238 		 * so we must read it before checking for more work.
7239 		 */
7240 		tnapi->last_tag = sblk->status_tag;
7241 		tnapi->last_irq_tag = tnapi->last_tag;
7242 		rmb();
7243 
7244 		/* check for RX/TX work to do */
7245 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7247 
7248 			/* This test here is not race free, but will reduce
7249 			 * the number of interrupts by looping again.
7250 			 */
7251 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7252 				continue;
7253 
7254 			napi_complete_done(napi, work_done);
7255 			/* Reenable interrupts. */
7256 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7257 
7258 			/* This test here is synchronized by napi_schedule()
7259 			 * and napi_complete() to close the race condition.
7260 			 */
7261 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7263 						  HOSTCC_MODE_ENABLE |
7264 						  tnapi->coal_now);
7265 			}
7266 			break;
7267 		}
7268 	}
7269 
7270 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7271 	return work_done;
7272 
7273 tx_recovery:
7274 	/* work_done is guaranteed to be less than budget. */
7275 	napi_complete(napi);
7276 	tg3_reset_task_schedule(tp);
7277 	return work_done;
7278 }
7279 
tg3_process_error(struct tg3 * tp)7280 static void tg3_process_error(struct tg3 *tp)
7281 {
7282 	u32 val;
7283 	bool real_error = false;
7284 
7285 	if (tg3_flag(tp, ERROR_PROCESSED))
7286 		return;
7287 
7288 	/* Check Flow Attention register */
7289 	val = tr32(HOSTCC_FLOW_ATTN);
7290 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7291 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7292 		real_error = true;
7293 	}
7294 
7295 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7296 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7297 		real_error = true;
7298 	}
7299 
7300 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7301 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7302 		real_error = true;
7303 	}
7304 
7305 	if (!real_error)
7306 		return;
7307 
7308 	tg3_dump_state(tp);
7309 
7310 	tg3_flag_set(tp, ERROR_PROCESSED);
7311 	tg3_reset_task_schedule(tp);
7312 }
7313 
tg3_poll(struct napi_struct * napi,int budget)7314 static int tg3_poll(struct napi_struct *napi, int budget)
7315 {
7316 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7317 	struct tg3 *tp = tnapi->tp;
7318 	int work_done = 0;
7319 	struct tg3_hw_status *sblk = tnapi->hw_status;
7320 
7321 	while (1) {
7322 		if (sblk->status & SD_STATUS_ERROR)
7323 			tg3_process_error(tp);
7324 
7325 		tg3_poll_link(tp);
7326 
7327 		work_done = tg3_poll_work(tnapi, work_done, budget);
7328 
7329 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7330 			goto tx_recovery;
7331 
7332 		if (unlikely(work_done >= budget))
7333 			break;
7334 
7335 		if (tg3_flag(tp, TAGGED_STATUS)) {
7336 			/* tp->last_tag is used in tg3_int_reenable() below
7337 			 * to tell the hw how much work has been processed,
7338 			 * so we must read it before checking for more work.
7339 			 */
7340 			tnapi->last_tag = sblk->status_tag;
7341 			tnapi->last_irq_tag = tnapi->last_tag;
7342 			rmb();
7343 		} else
7344 			sblk->status &= ~SD_STATUS_UPDATED;
7345 
7346 		if (likely(!tg3_has_work(tnapi))) {
7347 			napi_complete_done(napi, work_done);
7348 			tg3_int_reenable(tnapi);
7349 			break;
7350 		}
7351 	}
7352 
7353 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7354 	return work_done;
7355 
7356 tx_recovery:
7357 	/* work_done is guaranteed to be less than budget. */
7358 	napi_complete(napi);
7359 	tg3_reset_task_schedule(tp);
7360 	return work_done;
7361 }
7362 
tg3_napi_disable(struct tg3 * tp)7363 static void tg3_napi_disable(struct tg3 *tp)
7364 {
7365 	int i;
7366 
7367 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7368 		napi_disable(&tp->napi[i].napi);
7369 }
7370 
tg3_napi_enable(struct tg3 * tp)7371 static void tg3_napi_enable(struct tg3 *tp)
7372 {
7373 	int i;
7374 
7375 	for (i = 0; i < tp->irq_cnt; i++)
7376 		napi_enable(&tp->napi[i].napi);
7377 }
7378 
tg3_napi_init(struct tg3 * tp)7379 static void tg3_napi_init(struct tg3 *tp)
7380 {
7381 	int i;
7382 
7383 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7384 	for (i = 1; i < tp->irq_cnt; i++)
7385 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7386 }
7387 
tg3_napi_fini(struct tg3 * tp)7388 static void tg3_napi_fini(struct tg3 *tp)
7389 {
7390 	int i;
7391 
7392 	for (i = 0; i < tp->irq_cnt; i++)
7393 		netif_napi_del(&tp->napi[i].napi);
7394 }
7395 
tg3_netif_stop(struct tg3 * tp)7396 static inline void tg3_netif_stop(struct tg3 *tp)
7397 {
7398 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7399 	tg3_napi_disable(tp);
7400 	netif_carrier_off(tp->dev);
7401 	netif_tx_disable(tp->dev);
7402 }
7403 
7404 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7405 static inline void tg3_netif_start(struct tg3 *tp)
7406 {
7407 	tg3_ptp_resume(tp);
7408 
7409 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7410 	 * appropriate so long as all callers are assured to
7411 	 * have free tx slots (such as after tg3_init_hw)
7412 	 */
7413 	netif_tx_wake_all_queues(tp->dev);
7414 
7415 	if (tp->link_up)
7416 		netif_carrier_on(tp->dev);
7417 
7418 	tg3_napi_enable(tp);
7419 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7420 	tg3_enable_ints(tp);
7421 }
7422 
tg3_irq_quiesce(struct tg3 * tp)7423 static void tg3_irq_quiesce(struct tg3 *tp)
7424 	__releases(tp->lock)
7425 	__acquires(tp->lock)
7426 {
7427 	int i;
7428 
7429 	BUG_ON(tp->irq_sync);
7430 
7431 	tp->irq_sync = 1;
7432 	smp_mb();
7433 
7434 	spin_unlock_bh(&tp->lock);
7435 
7436 	for (i = 0; i < tp->irq_cnt; i++)
7437 		synchronize_irq(tp->napi[i].irq_vec);
7438 
7439 	spin_lock_bh(&tp->lock);
7440 }
7441 
7442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7443  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7444  * with as well.  Most of the time, this is not necessary except when
7445  * shutting down the device.
7446  */
tg3_full_lock(struct tg3 * tp,int irq_sync)7447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7448 {
7449 	spin_lock_bh(&tp->lock);
7450 	if (irq_sync)
7451 		tg3_irq_quiesce(tp);
7452 }
7453 
tg3_full_unlock(struct tg3 * tp)7454 static inline void tg3_full_unlock(struct tg3 *tp)
7455 {
7456 	spin_unlock_bh(&tp->lock);
7457 }
7458 
7459 /* One-shot MSI handler - Chip automatically disables interrupt
7460  * after sending MSI so driver doesn't have to do it.
7461  */
tg3_msi_1shot(int irq,void * dev_id)7462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7463 {
7464 	struct tg3_napi *tnapi = dev_id;
7465 	struct tg3 *tp = tnapi->tp;
7466 
7467 	prefetch(tnapi->hw_status);
7468 	if (tnapi->rx_rcb)
7469 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7470 
7471 	if (likely(!tg3_irq_sync(tp)))
7472 		napi_schedule(&tnapi->napi);
7473 
7474 	return IRQ_HANDLED;
7475 }
7476 
7477 /* MSI ISR - No need to check for interrupt sharing and no need to
7478  * flush status block and interrupt mailbox. PCI ordering rules
7479  * guarantee that MSI will arrive after the status block.
7480  */
tg3_msi(int irq,void * dev_id)7481 static irqreturn_t tg3_msi(int irq, void *dev_id)
7482 {
7483 	struct tg3_napi *tnapi = dev_id;
7484 	struct tg3 *tp = tnapi->tp;
7485 
7486 	prefetch(tnapi->hw_status);
7487 	if (tnapi->rx_rcb)
7488 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7489 	/*
7490 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7491 	 * chip-internal interrupt pending events.
7492 	 * Writing non-zero to intr-mbox-0 additional tells the
7493 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7494 	 * event coalescing.
7495 	 */
7496 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7497 	if (likely(!tg3_irq_sync(tp)))
7498 		napi_schedule(&tnapi->napi);
7499 
7500 	return IRQ_RETVAL(1);
7501 }
7502 
tg3_interrupt(int irq,void * dev_id)7503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7504 {
7505 	struct tg3_napi *tnapi = dev_id;
7506 	struct tg3 *tp = tnapi->tp;
7507 	struct tg3_hw_status *sblk = tnapi->hw_status;
7508 	unsigned int handled = 1;
7509 
7510 	/* In INTx mode, it is possible for the interrupt to arrive at
7511 	 * the CPU before the status block posted prior to the interrupt.
7512 	 * Reading the PCI State register will confirm whether the
7513 	 * interrupt is ours and will flush the status block.
7514 	 */
7515 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7516 		if (tg3_flag(tp, CHIP_RESETTING) ||
7517 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7518 			handled = 0;
7519 			goto out;
7520 		}
7521 	}
7522 
7523 	/*
7524 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7525 	 * chip-internal interrupt pending events.
7526 	 * Writing non-zero to intr-mbox-0 additional tells the
7527 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7528 	 * event coalescing.
7529 	 *
7530 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7531 	 * spurious interrupts.  The flush impacts performance but
7532 	 * excessive spurious interrupts can be worse in some cases.
7533 	 */
7534 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7535 	if (tg3_irq_sync(tp))
7536 		goto out;
7537 	sblk->status &= ~SD_STATUS_UPDATED;
7538 	if (likely(tg3_has_work(tnapi))) {
7539 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7540 		napi_schedule(&tnapi->napi);
7541 	} else {
7542 		/* No work, shared interrupt perhaps?  re-enable
7543 		 * interrupts, and flush that PCI write
7544 		 */
7545 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7546 			       0x00000000);
7547 	}
7548 out:
7549 	return IRQ_RETVAL(handled);
7550 }
7551 
tg3_interrupt_tagged(int irq,void * dev_id)7552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7553 {
7554 	struct tg3_napi *tnapi = dev_id;
7555 	struct tg3 *tp = tnapi->tp;
7556 	struct tg3_hw_status *sblk = tnapi->hw_status;
7557 	unsigned int handled = 1;
7558 
7559 	/* In INTx mode, it is possible for the interrupt to arrive at
7560 	 * the CPU before the status block posted prior to the interrupt.
7561 	 * Reading the PCI State register will confirm whether the
7562 	 * interrupt is ours and will flush the status block.
7563 	 */
7564 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7565 		if (tg3_flag(tp, CHIP_RESETTING) ||
7566 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7567 			handled = 0;
7568 			goto out;
7569 		}
7570 	}
7571 
7572 	/*
7573 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7574 	 * chip-internal interrupt pending events.
7575 	 * writing non-zero to intr-mbox-0 additional tells the
7576 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7577 	 * event coalescing.
7578 	 *
7579 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7580 	 * spurious interrupts.  The flush impacts performance but
7581 	 * excessive spurious interrupts can be worse in some cases.
7582 	 */
7583 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7584 
7585 	/*
7586 	 * In a shared interrupt configuration, sometimes other devices'
7587 	 * interrupts will scream.  We record the current status tag here
7588 	 * so that the above check can report that the screaming interrupts
7589 	 * are unhandled.  Eventually they will be silenced.
7590 	 */
7591 	tnapi->last_irq_tag = sblk->status_tag;
7592 
7593 	if (tg3_irq_sync(tp))
7594 		goto out;
7595 
7596 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7597 
7598 	napi_schedule(&tnapi->napi);
7599 
7600 out:
7601 	return IRQ_RETVAL(handled);
7602 }
7603 
7604 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7606 {
7607 	struct tg3_napi *tnapi = dev_id;
7608 	struct tg3 *tp = tnapi->tp;
7609 	struct tg3_hw_status *sblk = tnapi->hw_status;
7610 
7611 	if ((sblk->status & SD_STATUS_UPDATED) ||
7612 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7613 		tg3_disable_ints(tp);
7614 		return IRQ_RETVAL(1);
7615 	}
7616 	return IRQ_RETVAL(0);
7617 }
7618 
7619 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7620 static void tg3_poll_controller(struct net_device *dev)
7621 {
7622 	int i;
7623 	struct tg3 *tp = netdev_priv(dev);
7624 
7625 	if (tg3_irq_sync(tp))
7626 		return;
7627 
7628 	for (i = 0; i < tp->irq_cnt; i++)
7629 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7630 }
7631 #endif
7632 
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7633 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7634 {
7635 	struct tg3 *tp = netdev_priv(dev);
7636 
7637 	if (netif_msg_tx_err(tp)) {
7638 		netdev_err(dev, "transmit timed out, resetting\n");
7639 		tg3_dump_state(tp);
7640 	}
7641 
7642 	tg3_reset_task_schedule(tp);
7643 }
7644 
7645 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7646 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7647 {
7648 	u32 base = (u32) mapping & 0xffffffff;
7649 
7650 	return base + len + 8 < base;
7651 }
7652 
7653 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7654  * of any 4GB boundaries: 4G, 8G, etc
7655  */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7656 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7657 					   u32 len, u32 mss)
7658 {
7659 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7660 		u32 base = (u32) mapping & 0xffffffff;
7661 
7662 		return ((base + len + (mss & 0x3fff)) < base);
7663 	}
7664 	return 0;
7665 }
7666 
7667 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7668 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7669 					  int len)
7670 {
7671 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7672 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7673 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7674 	return 0;
7675 #else
7676 	return 0;
7677 #endif
7678 }
7679 
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7680 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7681 				 dma_addr_t mapping, u32 len, u32 flags,
7682 				 u32 mss, u32 vlan)
7683 {
7684 	txbd->addr_hi = ((u64) mapping >> 32);
7685 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7686 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7687 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7688 }
7689 
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7690 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7691 			    dma_addr_t map, u32 len, u32 flags,
7692 			    u32 mss, u32 vlan)
7693 {
7694 	struct tg3 *tp = tnapi->tp;
7695 	bool hwbug = false;
7696 
7697 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7698 		hwbug = true;
7699 
7700 	if (tg3_4g_overflow_test(map, len))
7701 		hwbug = true;
7702 
7703 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7704 		hwbug = true;
7705 
7706 	if (tg3_40bit_overflow_test(tp, map, len))
7707 		hwbug = true;
7708 
7709 	if (tp->dma_limit) {
7710 		u32 prvidx = *entry;
7711 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7712 		while (len > tp->dma_limit && *budget) {
7713 			u32 frag_len = tp->dma_limit;
7714 			len -= tp->dma_limit;
7715 
7716 			/* Avoid the 8byte DMA problem */
7717 			if (len <= 8) {
7718 				len += tp->dma_limit / 2;
7719 				frag_len = tp->dma_limit / 2;
7720 			}
7721 
7722 			tnapi->tx_buffers[*entry].fragmented = true;
7723 
7724 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7725 				      frag_len, tmp_flag, mss, vlan);
7726 			*budget -= 1;
7727 			prvidx = *entry;
7728 			*entry = NEXT_TX(*entry);
7729 
7730 			map += frag_len;
7731 		}
7732 
7733 		if (len) {
7734 			if (*budget) {
7735 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736 					      len, flags, mss, vlan);
7737 				*budget -= 1;
7738 				*entry = NEXT_TX(*entry);
7739 			} else {
7740 				hwbug = true;
7741 				tnapi->tx_buffers[prvidx].fragmented = false;
7742 			}
7743 		}
7744 	} else {
7745 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7746 			      len, flags, mss, vlan);
7747 		*entry = NEXT_TX(*entry);
7748 	}
7749 
7750 	return hwbug;
7751 }
7752 
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7753 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7754 {
7755 	int i;
7756 	struct sk_buff *skb;
7757 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7758 
7759 	skb = txb->skb;
7760 	txb->skb = NULL;
7761 
7762 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7763 			 skb_headlen(skb), DMA_TO_DEVICE);
7764 
7765 	while (txb->fragmented) {
7766 		txb->fragmented = false;
7767 		entry = NEXT_TX(entry);
7768 		txb = &tnapi->tx_buffers[entry];
7769 	}
7770 
7771 	for (i = 0; i <= last; i++) {
7772 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7773 
7774 		entry = NEXT_TX(entry);
7775 		txb = &tnapi->tx_buffers[entry];
7776 
7777 		dma_unmap_page(&tnapi->tp->pdev->dev,
7778 			       dma_unmap_addr(txb, mapping),
7779 			       skb_frag_size(frag), DMA_TO_DEVICE);
7780 
7781 		while (txb->fragmented) {
7782 			txb->fragmented = false;
7783 			entry = NEXT_TX(entry);
7784 			txb = &tnapi->tx_buffers[entry];
7785 		}
7786 	}
7787 }
7788 
7789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7791 				       struct sk_buff **pskb,
7792 				       u32 *entry, u32 *budget,
7793 				       u32 base_flags, u32 mss, u32 vlan)
7794 {
7795 	struct tg3 *tp = tnapi->tp;
7796 	struct sk_buff *new_skb, *skb = *pskb;
7797 	dma_addr_t new_addr = 0;
7798 	int ret = 0;
7799 
7800 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7801 		new_skb = skb_copy(skb, GFP_ATOMIC);
7802 	else {
7803 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7804 
7805 		new_skb = skb_copy_expand(skb,
7806 					  skb_headroom(skb) + more_headroom,
7807 					  skb_tailroom(skb), GFP_ATOMIC);
7808 	}
7809 
7810 	if (!new_skb) {
7811 		ret = -1;
7812 	} else {
7813 		/* New SKB is guaranteed to be linear. */
7814 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7815 					  new_skb->len, DMA_TO_DEVICE);
7816 		/* Make sure the mapping succeeded */
7817 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7818 			dev_kfree_skb_any(new_skb);
7819 			ret = -1;
7820 		} else {
7821 			u32 save_entry = *entry;
7822 
7823 			base_flags |= TXD_FLAG_END;
7824 
7825 			tnapi->tx_buffers[*entry].skb = new_skb;
7826 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7827 					   mapping, new_addr);
7828 
7829 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7830 					    new_skb->len, base_flags,
7831 					    mss, vlan)) {
7832 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7833 				dev_kfree_skb_any(new_skb);
7834 				ret = -1;
7835 			}
7836 		}
7837 	}
7838 
7839 	dev_consume_skb_any(skb);
7840 	*pskb = new_skb;
7841 	return ret;
7842 }
7843 
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7844 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7845 {
7846 	/* Check if we will never have enough descriptors,
7847 	 * as gso_segs can be more than current ring size
7848 	 */
7849 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7850 }
7851 
7852 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7853 
7854 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7855  * indicated in tg3_tx_frag_set()
7856  */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7857 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7858 		       struct netdev_queue *txq, struct sk_buff *skb)
7859 {
7860 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7861 	struct sk_buff *segs, *seg, *next;
7862 
7863 	/* Estimate the number of fragments in the worst case */
7864 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7865 		netif_tx_stop_queue(txq);
7866 
7867 		/* netif_tx_stop_queue() must be done before checking
7868 		 * checking tx index in tg3_tx_avail() below, because in
7869 		 * tg3_tx(), we update tx index before checking for
7870 		 * netif_tx_queue_stopped().
7871 		 */
7872 		smp_mb();
7873 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7874 			return NETDEV_TX_BUSY;
7875 
7876 		netif_tx_wake_queue(txq);
7877 	}
7878 
7879 	segs = skb_gso_segment(skb, tp->dev->features &
7880 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7881 	if (IS_ERR(segs) || !segs)
7882 		goto tg3_tso_bug_end;
7883 
7884 	skb_list_walk_safe(segs, seg, next) {
7885 		skb_mark_not_on_list(seg);
7886 		tg3_start_xmit(seg, tp->dev);
7887 	}
7888 
7889 tg3_tso_bug_end:
7890 	dev_consume_skb_any(skb);
7891 
7892 	return NETDEV_TX_OK;
7893 }
7894 
7895 /* hard_start_xmit for all devices */
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7896 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7897 {
7898 	struct tg3 *tp = netdev_priv(dev);
7899 	u32 len, entry, base_flags, mss, vlan = 0;
7900 	u32 budget;
7901 	int i = -1, would_hit_hwbug;
7902 	dma_addr_t mapping;
7903 	struct tg3_napi *tnapi;
7904 	struct netdev_queue *txq;
7905 	unsigned int last;
7906 	struct iphdr *iph = NULL;
7907 	struct tcphdr *tcph = NULL;
7908 	__sum16 tcp_csum = 0, ip_csum = 0;
7909 	__be16 ip_tot_len = 0;
7910 
7911 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7912 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7913 	if (tg3_flag(tp, ENABLE_TSS))
7914 		tnapi++;
7915 
7916 	budget = tg3_tx_avail(tnapi);
7917 
7918 	/* We are running in BH disabled context with netif_tx_lock
7919 	 * and TX reclaim runs via tp->napi.poll inside of a software
7920 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7921 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7922 	 */
7923 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7924 		if (!netif_tx_queue_stopped(txq)) {
7925 			netif_tx_stop_queue(txq);
7926 
7927 			/* This is a hard error, log it. */
7928 			netdev_err(dev,
7929 				   "BUG! Tx Ring full when queue awake!\n");
7930 		}
7931 		return NETDEV_TX_BUSY;
7932 	}
7933 
7934 	entry = tnapi->tx_prod;
7935 	base_flags = 0;
7936 
7937 	mss = skb_shinfo(skb)->gso_size;
7938 	if (mss) {
7939 		u32 tcp_opt_len, hdr_len;
7940 
7941 		if (skb_cow_head(skb, 0))
7942 			goto drop;
7943 
7944 		iph = ip_hdr(skb);
7945 		tcp_opt_len = tcp_optlen(skb);
7946 
7947 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7948 
7949 		/* HW/FW can not correctly segment packets that have been
7950 		 * vlan encapsulated.
7951 		 */
7952 		if (skb->protocol == htons(ETH_P_8021Q) ||
7953 		    skb->protocol == htons(ETH_P_8021AD)) {
7954 			if (tg3_tso_bug_gso_check(tnapi, skb))
7955 				return tg3_tso_bug(tp, tnapi, txq, skb);
7956 			goto drop;
7957 		}
7958 
7959 		if (!skb_is_gso_v6(skb)) {
7960 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7961 			    tg3_flag(tp, TSO_BUG)) {
7962 				if (tg3_tso_bug_gso_check(tnapi, skb))
7963 					return tg3_tso_bug(tp, tnapi, txq, skb);
7964 				goto drop;
7965 			}
7966 			ip_csum = iph->check;
7967 			ip_tot_len = iph->tot_len;
7968 			iph->check = 0;
7969 			iph->tot_len = htons(mss + hdr_len);
7970 		}
7971 
7972 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7973 			       TXD_FLAG_CPU_POST_DMA);
7974 
7975 		tcph = tcp_hdr(skb);
7976 		tcp_csum = tcph->check;
7977 
7978 		if (tg3_flag(tp, HW_TSO_1) ||
7979 		    tg3_flag(tp, HW_TSO_2) ||
7980 		    tg3_flag(tp, HW_TSO_3)) {
7981 			tcph->check = 0;
7982 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7983 		} else {
7984 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7985 							 0, IPPROTO_TCP, 0);
7986 		}
7987 
7988 		if (tg3_flag(tp, HW_TSO_3)) {
7989 			mss |= (hdr_len & 0xc) << 12;
7990 			if (hdr_len & 0x10)
7991 				base_flags |= 0x00000010;
7992 			base_flags |= (hdr_len & 0x3e0) << 5;
7993 		} else if (tg3_flag(tp, HW_TSO_2))
7994 			mss |= hdr_len << 9;
7995 		else if (tg3_flag(tp, HW_TSO_1) ||
7996 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7997 			if (tcp_opt_len || iph->ihl > 5) {
7998 				int tsflags;
7999 
8000 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8001 				mss |= (tsflags << 11);
8002 			}
8003 		} else {
8004 			if (tcp_opt_len || iph->ihl > 5) {
8005 				int tsflags;
8006 
8007 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8008 				base_flags |= tsflags << 12;
8009 			}
8010 		}
8011 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8012 		/* HW/FW can not correctly checksum packets that have been
8013 		 * vlan encapsulated.
8014 		 */
8015 		if (skb->protocol == htons(ETH_P_8021Q) ||
8016 		    skb->protocol == htons(ETH_P_8021AD)) {
8017 			if (skb_checksum_help(skb))
8018 				goto drop;
8019 		} else  {
8020 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8021 		}
8022 	}
8023 
8024 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8025 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8026 		base_flags |= TXD_FLAG_JMB_PKT;
8027 
8028 	if (skb_vlan_tag_present(skb)) {
8029 		base_flags |= TXD_FLAG_VLAN;
8030 		vlan = skb_vlan_tag_get(skb);
8031 	}
8032 
8033 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8034 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8035 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8036 		base_flags |= TXD_FLAG_HWTSTAMP;
8037 	}
8038 
8039 	len = skb_headlen(skb);
8040 
8041 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8042 				 DMA_TO_DEVICE);
8043 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8044 		goto drop;
8045 
8046 
8047 	tnapi->tx_buffers[entry].skb = skb;
8048 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8049 
8050 	would_hit_hwbug = 0;
8051 
8052 	if (tg3_flag(tp, 5701_DMA_BUG))
8053 		would_hit_hwbug = 1;
8054 
8055 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8056 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8057 			    mss, vlan)) {
8058 		would_hit_hwbug = 1;
8059 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8060 		u32 tmp_mss = mss;
8061 
8062 		if (!tg3_flag(tp, HW_TSO_1) &&
8063 		    !tg3_flag(tp, HW_TSO_2) &&
8064 		    !tg3_flag(tp, HW_TSO_3))
8065 			tmp_mss = 0;
8066 
8067 		/* Now loop through additional data
8068 		 * fragments, and queue them.
8069 		 */
8070 		last = skb_shinfo(skb)->nr_frags - 1;
8071 		for (i = 0; i <= last; i++) {
8072 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8073 
8074 			len = skb_frag_size(frag);
8075 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8076 						   len, DMA_TO_DEVICE);
8077 
8078 			tnapi->tx_buffers[entry].skb = NULL;
8079 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8080 					   mapping);
8081 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8082 				goto dma_error;
8083 
8084 			if (!budget ||
8085 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8086 					    len, base_flags |
8087 					    ((i == last) ? TXD_FLAG_END : 0),
8088 					    tmp_mss, vlan)) {
8089 				would_hit_hwbug = 1;
8090 				break;
8091 			}
8092 		}
8093 	}
8094 
8095 	if (would_hit_hwbug) {
8096 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8097 
8098 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8099 			/* If it's a TSO packet, do GSO instead of
8100 			 * allocating and copying to a large linear SKB
8101 			 */
8102 			if (ip_tot_len) {
8103 				iph->check = ip_csum;
8104 				iph->tot_len = ip_tot_len;
8105 			}
8106 			tcph->check = tcp_csum;
8107 			return tg3_tso_bug(tp, tnapi, txq, skb);
8108 		}
8109 
8110 		/* If the workaround fails due to memory/mapping
8111 		 * failure, silently drop this packet.
8112 		 */
8113 		entry = tnapi->tx_prod;
8114 		budget = tg3_tx_avail(tnapi);
8115 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8116 						base_flags, mss, vlan))
8117 			goto drop_nofree;
8118 	}
8119 
8120 	skb_tx_timestamp(skb);
8121 	netdev_tx_sent_queue(txq, skb->len);
8122 
8123 	/* Sync BD data before updating mailbox */
8124 	wmb();
8125 
8126 	tnapi->tx_prod = entry;
8127 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8128 		netif_tx_stop_queue(txq);
8129 
8130 		/* netif_tx_stop_queue() must be done before checking
8131 		 * checking tx index in tg3_tx_avail() below, because in
8132 		 * tg3_tx(), we update tx index before checking for
8133 		 * netif_tx_queue_stopped().
8134 		 */
8135 		smp_mb();
8136 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8137 			netif_tx_wake_queue(txq);
8138 	}
8139 
8140 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8141 		/* Packets are ready, update Tx producer idx on card. */
8142 		tw32_tx_mbox(tnapi->prodmbox, entry);
8143 	}
8144 
8145 	return NETDEV_TX_OK;
8146 
8147 dma_error:
8148 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8149 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8150 drop:
8151 	dev_kfree_skb_any(skb);
8152 drop_nofree:
8153 	tp->tx_dropped++;
8154 	return NETDEV_TX_OK;
8155 }
8156 
tg3_mac_loopback(struct tg3 * tp,bool enable)8157 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8158 {
8159 	if (enable) {
8160 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8161 				  MAC_MODE_PORT_MODE_MASK);
8162 
8163 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8164 
8165 		if (!tg3_flag(tp, 5705_PLUS))
8166 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8167 
8168 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8169 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8170 		else
8171 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8172 	} else {
8173 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8174 
8175 		if (tg3_flag(tp, 5705_PLUS) ||
8176 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8177 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8178 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8179 	}
8180 
8181 	tw32(MAC_MODE, tp->mac_mode);
8182 	udelay(40);
8183 }
8184 
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8185 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8186 {
8187 	u32 val, bmcr, mac_mode, ptest = 0;
8188 
8189 	tg3_phy_toggle_apd(tp, false);
8190 	tg3_phy_toggle_automdix(tp, false);
8191 
8192 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8193 		return -EIO;
8194 
8195 	bmcr = BMCR_FULLDPLX;
8196 	switch (speed) {
8197 	case SPEED_10:
8198 		break;
8199 	case SPEED_100:
8200 		bmcr |= BMCR_SPEED100;
8201 		break;
8202 	case SPEED_1000:
8203 	default:
8204 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8205 			speed = SPEED_100;
8206 			bmcr |= BMCR_SPEED100;
8207 		} else {
8208 			speed = SPEED_1000;
8209 			bmcr |= BMCR_SPEED1000;
8210 		}
8211 	}
8212 
8213 	if (extlpbk) {
8214 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8215 			tg3_readphy(tp, MII_CTRL1000, &val);
8216 			val |= CTL1000_AS_MASTER |
8217 			       CTL1000_ENABLE_MASTER;
8218 			tg3_writephy(tp, MII_CTRL1000, val);
8219 		} else {
8220 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8221 				MII_TG3_FET_PTEST_TRIM_2;
8222 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8223 		}
8224 	} else
8225 		bmcr |= BMCR_LOOPBACK;
8226 
8227 	tg3_writephy(tp, MII_BMCR, bmcr);
8228 
8229 	/* The write needs to be flushed for the FETs */
8230 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8231 		tg3_readphy(tp, MII_BMCR, &bmcr);
8232 
8233 	udelay(40);
8234 
8235 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8236 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8237 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8238 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8239 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8240 
8241 		/* The write needs to be flushed for the AC131 */
8242 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8243 	}
8244 
8245 	/* Reset to prevent losing 1st rx packet intermittently */
8246 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8247 	    tg3_flag(tp, 5780_CLASS)) {
8248 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8249 		udelay(10);
8250 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8251 	}
8252 
8253 	mac_mode = tp->mac_mode &
8254 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8255 	if (speed == SPEED_1000)
8256 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8257 	else
8258 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8259 
8260 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8261 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8262 
8263 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8264 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8265 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8266 			mac_mode |= MAC_MODE_LINK_POLARITY;
8267 
8268 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8269 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8270 	}
8271 
8272 	tw32(MAC_MODE, mac_mode);
8273 	udelay(40);
8274 
8275 	return 0;
8276 }
8277 
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8278 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8279 {
8280 	struct tg3 *tp = netdev_priv(dev);
8281 
8282 	if (features & NETIF_F_LOOPBACK) {
8283 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8284 			return;
8285 
8286 		spin_lock_bh(&tp->lock);
8287 		tg3_mac_loopback(tp, true);
8288 		netif_carrier_on(tp->dev);
8289 		spin_unlock_bh(&tp->lock);
8290 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8291 	} else {
8292 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8293 			return;
8294 
8295 		spin_lock_bh(&tp->lock);
8296 		tg3_mac_loopback(tp, false);
8297 		/* Force link status check */
8298 		tg3_setup_phy(tp, true);
8299 		spin_unlock_bh(&tp->lock);
8300 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8301 	}
8302 }
8303 
tg3_fix_features(struct net_device * dev,netdev_features_t features)8304 static netdev_features_t tg3_fix_features(struct net_device *dev,
8305 	netdev_features_t features)
8306 {
8307 	struct tg3 *tp = netdev_priv(dev);
8308 
8309 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8310 		features &= ~NETIF_F_ALL_TSO;
8311 
8312 	return features;
8313 }
8314 
tg3_set_features(struct net_device * dev,netdev_features_t features)8315 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8316 {
8317 	netdev_features_t changed = dev->features ^ features;
8318 
8319 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8320 		tg3_set_loopback(dev, features);
8321 
8322 	return 0;
8323 }
8324 
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8325 static void tg3_rx_prodring_free(struct tg3 *tp,
8326 				 struct tg3_rx_prodring_set *tpr)
8327 {
8328 	int i;
8329 
8330 	if (tpr != &tp->napi[0].prodring) {
8331 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8332 		     i = (i + 1) & tp->rx_std_ring_mask)
8333 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8334 					tp->rx_pkt_map_sz);
8335 
8336 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8337 			for (i = tpr->rx_jmb_cons_idx;
8338 			     i != tpr->rx_jmb_prod_idx;
8339 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8340 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8341 						TG3_RX_JMB_MAP_SZ);
8342 			}
8343 		}
8344 
8345 		return;
8346 	}
8347 
8348 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8349 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8350 				tp->rx_pkt_map_sz);
8351 
8352 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8353 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8354 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8355 					TG3_RX_JMB_MAP_SZ);
8356 	}
8357 }
8358 
8359 /* Initialize rx rings for packet processing.
8360  *
8361  * The chip has been shut down and the driver detached from
8362  * the networking, so no interrupts or new tx packets will
8363  * end up in the driver.  tp->{tx,}lock are held and thus
8364  * we may not sleep.
8365  */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8366 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8367 				 struct tg3_rx_prodring_set *tpr)
8368 {
8369 	u32 i, rx_pkt_dma_sz;
8370 
8371 	tpr->rx_std_cons_idx = 0;
8372 	tpr->rx_std_prod_idx = 0;
8373 	tpr->rx_jmb_cons_idx = 0;
8374 	tpr->rx_jmb_prod_idx = 0;
8375 
8376 	if (tpr != &tp->napi[0].prodring) {
8377 		memset(&tpr->rx_std_buffers[0], 0,
8378 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8379 		if (tpr->rx_jmb_buffers)
8380 			memset(&tpr->rx_jmb_buffers[0], 0,
8381 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8382 		goto done;
8383 	}
8384 
8385 	/* Zero out all descriptors. */
8386 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8387 
8388 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8389 	if (tg3_flag(tp, 5780_CLASS) &&
8390 	    tp->dev->mtu > ETH_DATA_LEN)
8391 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8392 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8393 
8394 	/* Initialize invariants of the rings, we only set this
8395 	 * stuff once.  This works because the card does not
8396 	 * write into the rx buffer posting rings.
8397 	 */
8398 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8399 		struct tg3_rx_buffer_desc *rxd;
8400 
8401 		rxd = &tpr->rx_std[i];
8402 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8403 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8404 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8405 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8406 	}
8407 
8408 	/* Now allocate fresh SKBs for each rx ring. */
8409 	for (i = 0; i < tp->rx_pending; i++) {
8410 		unsigned int frag_size;
8411 
8412 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8413 				      &frag_size) < 0) {
8414 			netdev_warn(tp->dev,
8415 				    "Using a smaller RX standard ring. Only "
8416 				    "%d out of %d buffers were allocated "
8417 				    "successfully\n", i, tp->rx_pending);
8418 			if (i == 0)
8419 				goto initfail;
8420 			tp->rx_pending = i;
8421 			break;
8422 		}
8423 	}
8424 
8425 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8426 		goto done;
8427 
8428 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8429 
8430 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8431 		goto done;
8432 
8433 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8434 		struct tg3_rx_buffer_desc *rxd;
8435 
8436 		rxd = &tpr->rx_jmb[i].std;
8437 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8438 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8439 				  RXD_FLAG_JUMBO;
8440 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8441 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8442 	}
8443 
8444 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8445 		unsigned int frag_size;
8446 
8447 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8448 				      &frag_size) < 0) {
8449 			netdev_warn(tp->dev,
8450 				    "Using a smaller RX jumbo ring. Only %d "
8451 				    "out of %d buffers were allocated "
8452 				    "successfully\n", i, tp->rx_jumbo_pending);
8453 			if (i == 0)
8454 				goto initfail;
8455 			tp->rx_jumbo_pending = i;
8456 			break;
8457 		}
8458 	}
8459 
8460 done:
8461 	return 0;
8462 
8463 initfail:
8464 	tg3_rx_prodring_free(tp, tpr);
8465 	return -ENOMEM;
8466 }
8467 
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8468 static void tg3_rx_prodring_fini(struct tg3 *tp,
8469 				 struct tg3_rx_prodring_set *tpr)
8470 {
8471 	kfree(tpr->rx_std_buffers);
8472 	tpr->rx_std_buffers = NULL;
8473 	kfree(tpr->rx_jmb_buffers);
8474 	tpr->rx_jmb_buffers = NULL;
8475 	if (tpr->rx_std) {
8476 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8477 				  tpr->rx_std, tpr->rx_std_mapping);
8478 		tpr->rx_std = NULL;
8479 	}
8480 	if (tpr->rx_jmb) {
8481 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8482 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8483 		tpr->rx_jmb = NULL;
8484 	}
8485 }
8486 
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8487 static int tg3_rx_prodring_init(struct tg3 *tp,
8488 				struct tg3_rx_prodring_set *tpr)
8489 {
8490 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8491 				      GFP_KERNEL);
8492 	if (!tpr->rx_std_buffers)
8493 		return -ENOMEM;
8494 
8495 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8496 					 TG3_RX_STD_RING_BYTES(tp),
8497 					 &tpr->rx_std_mapping,
8498 					 GFP_KERNEL);
8499 	if (!tpr->rx_std)
8500 		goto err_out;
8501 
8502 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8503 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8504 					      GFP_KERNEL);
8505 		if (!tpr->rx_jmb_buffers)
8506 			goto err_out;
8507 
8508 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8509 						 TG3_RX_JMB_RING_BYTES(tp),
8510 						 &tpr->rx_jmb_mapping,
8511 						 GFP_KERNEL);
8512 		if (!tpr->rx_jmb)
8513 			goto err_out;
8514 	}
8515 
8516 	return 0;
8517 
8518 err_out:
8519 	tg3_rx_prodring_fini(tp, tpr);
8520 	return -ENOMEM;
8521 }
8522 
8523 /* Free up pending packets in all rx/tx rings.
8524  *
8525  * The chip has been shut down and the driver detached from
8526  * the networking, so no interrupts or new tx packets will
8527  * end up in the driver.  tp->{tx,}lock is not held and we are not
8528  * in an interrupt context and thus may sleep.
8529  */
tg3_free_rings(struct tg3 * tp)8530 static void tg3_free_rings(struct tg3 *tp)
8531 {
8532 	int i, j;
8533 
8534 	for (j = 0; j < tp->irq_cnt; j++) {
8535 		struct tg3_napi *tnapi = &tp->napi[j];
8536 
8537 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8538 
8539 		if (!tnapi->tx_buffers)
8540 			continue;
8541 
8542 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8543 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8544 
8545 			if (!skb)
8546 				continue;
8547 
8548 			tg3_tx_skb_unmap(tnapi, i,
8549 					 skb_shinfo(skb)->nr_frags - 1);
8550 
8551 			dev_consume_skb_any(skb);
8552 		}
8553 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8554 	}
8555 }
8556 
8557 /* Initialize tx/rx rings for packet processing.
8558  *
8559  * The chip has been shut down and the driver detached from
8560  * the networking, so no interrupts or new tx packets will
8561  * end up in the driver.  tp->{tx,}lock are held and thus
8562  * we may not sleep.
8563  */
tg3_init_rings(struct tg3 * tp)8564 static int tg3_init_rings(struct tg3 *tp)
8565 {
8566 	int i;
8567 
8568 	/* Free up all the SKBs. */
8569 	tg3_free_rings(tp);
8570 
8571 	for (i = 0; i < tp->irq_cnt; i++) {
8572 		struct tg3_napi *tnapi = &tp->napi[i];
8573 
8574 		tnapi->last_tag = 0;
8575 		tnapi->last_irq_tag = 0;
8576 		tnapi->hw_status->status = 0;
8577 		tnapi->hw_status->status_tag = 0;
8578 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8579 
8580 		tnapi->tx_prod = 0;
8581 		tnapi->tx_cons = 0;
8582 		if (tnapi->tx_ring)
8583 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8584 
8585 		tnapi->rx_rcb_ptr = 0;
8586 		if (tnapi->rx_rcb)
8587 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8588 
8589 		if (tnapi->prodring.rx_std &&
8590 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8591 			tg3_free_rings(tp);
8592 			return -ENOMEM;
8593 		}
8594 	}
8595 
8596 	return 0;
8597 }
8598 
tg3_mem_tx_release(struct tg3 * tp)8599 static void tg3_mem_tx_release(struct tg3 *tp)
8600 {
8601 	int i;
8602 
8603 	for (i = 0; i < tp->irq_max; i++) {
8604 		struct tg3_napi *tnapi = &tp->napi[i];
8605 
8606 		if (tnapi->tx_ring) {
8607 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8608 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8609 			tnapi->tx_ring = NULL;
8610 		}
8611 
8612 		kfree(tnapi->tx_buffers);
8613 		tnapi->tx_buffers = NULL;
8614 	}
8615 }
8616 
tg3_mem_tx_acquire(struct tg3 * tp)8617 static int tg3_mem_tx_acquire(struct tg3 *tp)
8618 {
8619 	int i;
8620 	struct tg3_napi *tnapi = &tp->napi[0];
8621 
8622 	/* If multivector TSS is enabled, vector 0 does not handle
8623 	 * tx interrupts.  Don't allocate any resources for it.
8624 	 */
8625 	if (tg3_flag(tp, ENABLE_TSS))
8626 		tnapi++;
8627 
8628 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8629 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8630 					    sizeof(struct tg3_tx_ring_info),
8631 					    GFP_KERNEL);
8632 		if (!tnapi->tx_buffers)
8633 			goto err_out;
8634 
8635 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8636 						    TG3_TX_RING_BYTES,
8637 						    &tnapi->tx_desc_mapping,
8638 						    GFP_KERNEL);
8639 		if (!tnapi->tx_ring)
8640 			goto err_out;
8641 	}
8642 
8643 	return 0;
8644 
8645 err_out:
8646 	tg3_mem_tx_release(tp);
8647 	return -ENOMEM;
8648 }
8649 
tg3_mem_rx_release(struct tg3 * tp)8650 static void tg3_mem_rx_release(struct tg3 *tp)
8651 {
8652 	int i;
8653 
8654 	for (i = 0; i < tp->irq_max; i++) {
8655 		struct tg3_napi *tnapi = &tp->napi[i];
8656 
8657 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8658 
8659 		if (!tnapi->rx_rcb)
8660 			continue;
8661 
8662 		dma_free_coherent(&tp->pdev->dev,
8663 				  TG3_RX_RCB_RING_BYTES(tp),
8664 				  tnapi->rx_rcb,
8665 				  tnapi->rx_rcb_mapping);
8666 		tnapi->rx_rcb = NULL;
8667 	}
8668 }
8669 
tg3_mem_rx_acquire(struct tg3 * tp)8670 static int tg3_mem_rx_acquire(struct tg3 *tp)
8671 {
8672 	unsigned int i, limit;
8673 
8674 	limit = tp->rxq_cnt;
8675 
8676 	/* If RSS is enabled, we need a (dummy) producer ring
8677 	 * set on vector zero.  This is the true hw prodring.
8678 	 */
8679 	if (tg3_flag(tp, ENABLE_RSS))
8680 		limit++;
8681 
8682 	for (i = 0; i < limit; i++) {
8683 		struct tg3_napi *tnapi = &tp->napi[i];
8684 
8685 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8686 			goto err_out;
8687 
8688 		/* If multivector RSS is enabled, vector 0
8689 		 * does not handle rx or tx interrupts.
8690 		 * Don't allocate any resources for it.
8691 		 */
8692 		if (!i && tg3_flag(tp, ENABLE_RSS))
8693 			continue;
8694 
8695 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8696 						   TG3_RX_RCB_RING_BYTES(tp),
8697 						   &tnapi->rx_rcb_mapping,
8698 						   GFP_KERNEL);
8699 		if (!tnapi->rx_rcb)
8700 			goto err_out;
8701 	}
8702 
8703 	return 0;
8704 
8705 err_out:
8706 	tg3_mem_rx_release(tp);
8707 	return -ENOMEM;
8708 }
8709 
8710 /*
8711  * Must not be invoked with interrupt sources disabled and
8712  * the hardware shutdown down.
8713  */
tg3_free_consistent(struct tg3 * tp)8714 static void tg3_free_consistent(struct tg3 *tp)
8715 {
8716 	int i;
8717 
8718 	for (i = 0; i < tp->irq_cnt; i++) {
8719 		struct tg3_napi *tnapi = &tp->napi[i];
8720 
8721 		if (tnapi->hw_status) {
8722 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8723 					  tnapi->hw_status,
8724 					  tnapi->status_mapping);
8725 			tnapi->hw_status = NULL;
8726 		}
8727 	}
8728 
8729 	tg3_mem_rx_release(tp);
8730 	tg3_mem_tx_release(tp);
8731 
8732 	/* tp->hw_stats can be referenced safely:
8733 	 *     1. under rtnl_lock
8734 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8735 	 */
8736 	if (tp->hw_stats) {
8737 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8738 				  tp->hw_stats, tp->stats_mapping);
8739 		tp->hw_stats = NULL;
8740 	}
8741 }
8742 
8743 /*
8744  * Must not be invoked with interrupt sources disabled and
8745  * the hardware shutdown down.  Can sleep.
8746  */
tg3_alloc_consistent(struct tg3 * tp)8747 static int tg3_alloc_consistent(struct tg3 *tp)
8748 {
8749 	int i;
8750 
8751 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8752 					  sizeof(struct tg3_hw_stats),
8753 					  &tp->stats_mapping, GFP_KERNEL);
8754 	if (!tp->hw_stats)
8755 		goto err_out;
8756 
8757 	for (i = 0; i < tp->irq_cnt; i++) {
8758 		struct tg3_napi *tnapi = &tp->napi[i];
8759 		struct tg3_hw_status *sblk;
8760 
8761 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8762 						      TG3_HW_STATUS_SIZE,
8763 						      &tnapi->status_mapping,
8764 						      GFP_KERNEL);
8765 		if (!tnapi->hw_status)
8766 			goto err_out;
8767 
8768 		sblk = tnapi->hw_status;
8769 
8770 		if (tg3_flag(tp, ENABLE_RSS)) {
8771 			u16 *prodptr = NULL;
8772 
8773 			/*
8774 			 * When RSS is enabled, the status block format changes
8775 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8776 			 * and "rx_mini_consumer" members get mapped to the
8777 			 * other three rx return ring producer indexes.
8778 			 */
8779 			switch (i) {
8780 			case 1:
8781 				prodptr = &sblk->idx[0].rx_producer;
8782 				break;
8783 			case 2:
8784 				prodptr = &sblk->rx_jumbo_consumer;
8785 				break;
8786 			case 3:
8787 				prodptr = &sblk->reserved;
8788 				break;
8789 			case 4:
8790 				prodptr = &sblk->rx_mini_consumer;
8791 				break;
8792 			}
8793 			tnapi->rx_rcb_prod_idx = prodptr;
8794 		} else {
8795 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8796 		}
8797 	}
8798 
8799 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8800 		goto err_out;
8801 
8802 	return 0;
8803 
8804 err_out:
8805 	tg3_free_consistent(tp);
8806 	return -ENOMEM;
8807 }
8808 
8809 #define MAX_WAIT_CNT 1000
8810 
8811 /* To stop a block, clear the enable bit and poll till it
8812  * clears.  tp->lock is held.
8813  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8814 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8815 {
8816 	unsigned int i;
8817 	u32 val;
8818 
8819 	if (tg3_flag(tp, 5705_PLUS)) {
8820 		switch (ofs) {
8821 		case RCVLSC_MODE:
8822 		case DMAC_MODE:
8823 		case MBFREE_MODE:
8824 		case BUFMGR_MODE:
8825 		case MEMARB_MODE:
8826 			/* We can't enable/disable these bits of the
8827 			 * 5705/5750, just say success.
8828 			 */
8829 			return 0;
8830 
8831 		default:
8832 			break;
8833 		}
8834 	}
8835 
8836 	val = tr32(ofs);
8837 	val &= ~enable_bit;
8838 	tw32_f(ofs, val);
8839 
8840 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8841 		if (pci_channel_offline(tp->pdev)) {
8842 			dev_err(&tp->pdev->dev,
8843 				"tg3_stop_block device offline, "
8844 				"ofs=%lx enable_bit=%x\n",
8845 				ofs, enable_bit);
8846 			return -ENODEV;
8847 		}
8848 
8849 		udelay(100);
8850 		val = tr32(ofs);
8851 		if ((val & enable_bit) == 0)
8852 			break;
8853 	}
8854 
8855 	if (i == MAX_WAIT_CNT && !silent) {
8856 		dev_err(&tp->pdev->dev,
8857 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8858 			ofs, enable_bit);
8859 		return -ENODEV;
8860 	}
8861 
8862 	return 0;
8863 }
8864 
8865 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8866 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8867 {
8868 	int i, err;
8869 
8870 	tg3_disable_ints(tp);
8871 
8872 	if (pci_channel_offline(tp->pdev)) {
8873 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8874 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8875 		err = -ENODEV;
8876 		goto err_no_dev;
8877 	}
8878 
8879 	tp->rx_mode &= ~RX_MODE_ENABLE;
8880 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8881 	udelay(10);
8882 
8883 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8884 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8885 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8886 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8887 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8888 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8889 
8890 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8895 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8896 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8897 
8898 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8899 	tw32_f(MAC_MODE, tp->mac_mode);
8900 	udelay(40);
8901 
8902 	tp->tx_mode &= ~TX_MODE_ENABLE;
8903 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8904 
8905 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8906 		udelay(100);
8907 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8908 			break;
8909 	}
8910 	if (i >= MAX_WAIT_CNT) {
8911 		dev_err(&tp->pdev->dev,
8912 			"%s timed out, TX_MODE_ENABLE will not clear "
8913 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8914 		err |= -ENODEV;
8915 	}
8916 
8917 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8918 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8919 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8920 
8921 	tw32(FTQ_RESET, 0xffffffff);
8922 	tw32(FTQ_RESET, 0x00000000);
8923 
8924 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8925 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8926 
8927 err_no_dev:
8928 	for (i = 0; i < tp->irq_cnt; i++) {
8929 		struct tg3_napi *tnapi = &tp->napi[i];
8930 		if (tnapi->hw_status)
8931 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8932 	}
8933 
8934 	return err;
8935 }
8936 
8937 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)8938 static void tg3_save_pci_state(struct tg3 *tp)
8939 {
8940 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8941 }
8942 
8943 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)8944 static void tg3_restore_pci_state(struct tg3 *tp)
8945 {
8946 	u32 val;
8947 
8948 	/* Re-enable indirect register accesses. */
8949 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8950 			       tp->misc_host_ctrl);
8951 
8952 	/* Set MAX PCI retry to zero. */
8953 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8954 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8955 	    tg3_flag(tp, PCIX_MODE))
8956 		val |= PCISTATE_RETRY_SAME_DMA;
8957 	/* Allow reads and writes to the APE register and memory space. */
8958 	if (tg3_flag(tp, ENABLE_APE))
8959 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8960 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8961 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8962 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8963 
8964 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8965 
8966 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8967 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8968 				      tp->pci_cacheline_sz);
8969 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8970 				      tp->pci_lat_timer);
8971 	}
8972 
8973 	/* Make sure PCI-X relaxed ordering bit is clear. */
8974 	if (tg3_flag(tp, PCIX_MODE)) {
8975 		u16 pcix_cmd;
8976 
8977 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8978 				     &pcix_cmd);
8979 		pcix_cmd &= ~PCI_X_CMD_ERO;
8980 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8981 				      pcix_cmd);
8982 	}
8983 
8984 	if (tg3_flag(tp, 5780_CLASS)) {
8985 
8986 		/* Chip reset on 5780 will reset MSI enable bit,
8987 		 * so need to restore it.
8988 		 */
8989 		if (tg3_flag(tp, USING_MSI)) {
8990 			u16 ctrl;
8991 
8992 			pci_read_config_word(tp->pdev,
8993 					     tp->msi_cap + PCI_MSI_FLAGS,
8994 					     &ctrl);
8995 			pci_write_config_word(tp->pdev,
8996 					      tp->msi_cap + PCI_MSI_FLAGS,
8997 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8998 			val = tr32(MSGINT_MODE);
8999 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9000 		}
9001 	}
9002 }
9003 
tg3_override_clk(struct tg3 * tp)9004 static void tg3_override_clk(struct tg3 *tp)
9005 {
9006 	u32 val;
9007 
9008 	switch (tg3_asic_rev(tp)) {
9009 	case ASIC_REV_5717:
9010 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9011 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9012 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9013 		break;
9014 
9015 	case ASIC_REV_5719:
9016 	case ASIC_REV_5720:
9017 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9018 		break;
9019 
9020 	default:
9021 		return;
9022 	}
9023 }
9024 
tg3_restore_clk(struct tg3 * tp)9025 static void tg3_restore_clk(struct tg3 *tp)
9026 {
9027 	u32 val;
9028 
9029 	switch (tg3_asic_rev(tp)) {
9030 	case ASIC_REV_5717:
9031 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9032 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9033 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9034 		break;
9035 
9036 	case ASIC_REV_5719:
9037 	case ASIC_REV_5720:
9038 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9039 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9040 		break;
9041 
9042 	default:
9043 		return;
9044 	}
9045 }
9046 
9047 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9048 static int tg3_chip_reset(struct tg3 *tp)
9049 	__releases(tp->lock)
9050 	__acquires(tp->lock)
9051 {
9052 	u32 val;
9053 	void (*write_op)(struct tg3 *, u32, u32);
9054 	int i, err;
9055 
9056 	if (!pci_device_is_present(tp->pdev))
9057 		return -ENODEV;
9058 
9059 	tg3_nvram_lock(tp);
9060 
9061 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9062 
9063 	/* No matching tg3_nvram_unlock() after this because
9064 	 * chip reset below will undo the nvram lock.
9065 	 */
9066 	tp->nvram_lock_cnt = 0;
9067 
9068 	/* GRC_MISC_CFG core clock reset will clear the memory
9069 	 * enable bit in PCI register 4 and the MSI enable bit
9070 	 * on some chips, so we save relevant registers here.
9071 	 */
9072 	tg3_save_pci_state(tp);
9073 
9074 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9075 	    tg3_flag(tp, 5755_PLUS))
9076 		tw32(GRC_FASTBOOT_PC, 0);
9077 
9078 	/*
9079 	 * We must avoid the readl() that normally takes place.
9080 	 * It locks machines, causes machine checks, and other
9081 	 * fun things.  So, temporarily disable the 5701
9082 	 * hardware workaround, while we do the reset.
9083 	 */
9084 	write_op = tp->write32;
9085 	if (write_op == tg3_write_flush_reg32)
9086 		tp->write32 = tg3_write32;
9087 
9088 	/* Prevent the irq handler from reading or writing PCI registers
9089 	 * during chip reset when the memory enable bit in the PCI command
9090 	 * register may be cleared.  The chip does not generate interrupt
9091 	 * at this time, but the irq handler may still be called due to irq
9092 	 * sharing or irqpoll.
9093 	 */
9094 	tg3_flag_set(tp, CHIP_RESETTING);
9095 	for (i = 0; i < tp->irq_cnt; i++) {
9096 		struct tg3_napi *tnapi = &tp->napi[i];
9097 		if (tnapi->hw_status) {
9098 			tnapi->hw_status->status = 0;
9099 			tnapi->hw_status->status_tag = 0;
9100 		}
9101 		tnapi->last_tag = 0;
9102 		tnapi->last_irq_tag = 0;
9103 	}
9104 	smp_mb();
9105 
9106 	tg3_full_unlock(tp);
9107 
9108 	for (i = 0; i < tp->irq_cnt; i++)
9109 		synchronize_irq(tp->napi[i].irq_vec);
9110 
9111 	tg3_full_lock(tp, 0);
9112 
9113 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9114 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9115 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9116 	}
9117 
9118 	/* do the reset */
9119 	val = GRC_MISC_CFG_CORECLK_RESET;
9120 
9121 	if (tg3_flag(tp, PCI_EXPRESS)) {
9122 		/* Force PCIe 1.0a mode */
9123 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9124 		    !tg3_flag(tp, 57765_PLUS) &&
9125 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9126 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9127 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9128 
9129 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9130 			tw32(GRC_MISC_CFG, (1 << 29));
9131 			val |= (1 << 29);
9132 		}
9133 	}
9134 
9135 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9136 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9137 		tw32(GRC_VCPU_EXT_CTRL,
9138 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9139 	}
9140 
9141 	/* Set the clock to the highest frequency to avoid timeouts. With link
9142 	 * aware mode, the clock speed could be slow and bootcode does not
9143 	 * complete within the expected time. Override the clock to allow the
9144 	 * bootcode to finish sooner and then restore it.
9145 	 */
9146 	tg3_override_clk(tp);
9147 
9148 	/* Manage gphy power for all CPMU absent PCIe devices. */
9149 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9150 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9151 
9152 	tw32(GRC_MISC_CFG, val);
9153 
9154 	/* restore 5701 hardware bug workaround write method */
9155 	tp->write32 = write_op;
9156 
9157 	/* Unfortunately, we have to delay before the PCI read back.
9158 	 * Some 575X chips even will not respond to a PCI cfg access
9159 	 * when the reset command is given to the chip.
9160 	 *
9161 	 * How do these hardware designers expect things to work
9162 	 * properly if the PCI write is posted for a long period
9163 	 * of time?  It is always necessary to have some method by
9164 	 * which a register read back can occur to push the write
9165 	 * out which does the reset.
9166 	 *
9167 	 * For most tg3 variants the trick below was working.
9168 	 * Ho hum...
9169 	 */
9170 	udelay(120);
9171 
9172 	/* Flush PCI posted writes.  The normal MMIO registers
9173 	 * are inaccessible at this time so this is the only
9174 	 * way to make this reliably (actually, this is no longer
9175 	 * the case, see above).  I tried to use indirect
9176 	 * register read/write but this upset some 5701 variants.
9177 	 */
9178 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9179 
9180 	udelay(120);
9181 
9182 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9183 		u16 val16;
9184 
9185 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9186 			int j;
9187 			u32 cfg_val;
9188 
9189 			/* Wait for link training to complete.  */
9190 			for (j = 0; j < 5000; j++)
9191 				udelay(100);
9192 
9193 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9194 			pci_write_config_dword(tp->pdev, 0xc4,
9195 					       cfg_val | (1 << 15));
9196 		}
9197 
9198 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9199 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9200 		/*
9201 		 * Older PCIe devices only support the 128 byte
9202 		 * MPS setting.  Enforce the restriction.
9203 		 */
9204 		if (!tg3_flag(tp, CPMU_PRESENT))
9205 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9206 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9207 
9208 		/* Clear error status */
9209 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9210 				      PCI_EXP_DEVSTA_CED |
9211 				      PCI_EXP_DEVSTA_NFED |
9212 				      PCI_EXP_DEVSTA_FED |
9213 				      PCI_EXP_DEVSTA_URD);
9214 	}
9215 
9216 	tg3_restore_pci_state(tp);
9217 
9218 	tg3_flag_clear(tp, CHIP_RESETTING);
9219 	tg3_flag_clear(tp, ERROR_PROCESSED);
9220 
9221 	val = 0;
9222 	if (tg3_flag(tp, 5780_CLASS))
9223 		val = tr32(MEMARB_MODE);
9224 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9225 
9226 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9227 		tg3_stop_fw(tp);
9228 		tw32(0x5000, 0x400);
9229 	}
9230 
9231 	if (tg3_flag(tp, IS_SSB_CORE)) {
9232 		/*
9233 		 * BCM4785: In order to avoid repercussions from using
9234 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9235 		 * which is not required.
9236 		 */
9237 		tg3_stop_fw(tp);
9238 		tg3_halt_cpu(tp, RX_CPU_BASE);
9239 	}
9240 
9241 	err = tg3_poll_fw(tp);
9242 	if (err)
9243 		return err;
9244 
9245 	tw32(GRC_MODE, tp->grc_mode);
9246 
9247 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9248 		val = tr32(0xc4);
9249 
9250 		tw32(0xc4, val | (1 << 15));
9251 	}
9252 
9253 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9254 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9255 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9256 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9257 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9258 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9259 	}
9260 
9261 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9262 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9263 		val = tp->mac_mode;
9264 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9265 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9266 		val = tp->mac_mode;
9267 	} else
9268 		val = 0;
9269 
9270 	tw32_f(MAC_MODE, val);
9271 	udelay(40);
9272 
9273 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9274 
9275 	tg3_mdio_start(tp);
9276 
9277 	if (tg3_flag(tp, PCI_EXPRESS) &&
9278 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9279 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9280 	    !tg3_flag(tp, 57765_PLUS)) {
9281 		val = tr32(0x7c00);
9282 
9283 		tw32(0x7c00, val | (1 << 25));
9284 	}
9285 
9286 	tg3_restore_clk(tp);
9287 
9288 	/* Increase the core clock speed to fix tx timeout issue for 5762
9289 	 * with 100Mbps link speed.
9290 	 */
9291 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9292 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9293 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9294 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9295 	}
9296 
9297 	/* Reprobe ASF enable state.  */
9298 	tg3_flag_clear(tp, ENABLE_ASF);
9299 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9300 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9301 
9302 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9303 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9304 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9305 		u32 nic_cfg;
9306 
9307 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9308 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9309 			tg3_flag_set(tp, ENABLE_ASF);
9310 			tp->last_event_jiffies = jiffies;
9311 			if (tg3_flag(tp, 5750_PLUS))
9312 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9313 
9314 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9315 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9316 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9317 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9318 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9319 		}
9320 	}
9321 
9322 	return 0;
9323 }
9324 
9325 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9326 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9327 static void __tg3_set_rx_mode(struct net_device *);
9328 
9329 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9330 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9331 {
9332 	int err;
9333 
9334 	tg3_stop_fw(tp);
9335 
9336 	tg3_write_sig_pre_reset(tp, kind);
9337 
9338 	tg3_abort_hw(tp, silent);
9339 	err = tg3_chip_reset(tp);
9340 
9341 	__tg3_set_mac_addr(tp, false);
9342 
9343 	tg3_write_sig_legacy(tp, kind);
9344 	tg3_write_sig_post_reset(tp, kind);
9345 
9346 	if (tp->hw_stats) {
9347 		/* Save the stats across chip resets... */
9348 		tg3_get_nstats(tp, &tp->net_stats_prev);
9349 		tg3_get_estats(tp, &tp->estats_prev);
9350 
9351 		/* And make sure the next sample is new data */
9352 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9353 	}
9354 
9355 	return err;
9356 }
9357 
tg3_set_mac_addr(struct net_device * dev,void * p)9358 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9359 {
9360 	struct tg3 *tp = netdev_priv(dev);
9361 	struct sockaddr *addr = p;
9362 	int err = 0;
9363 	bool skip_mac_1 = false;
9364 
9365 	if (!is_valid_ether_addr(addr->sa_data))
9366 		return -EADDRNOTAVAIL;
9367 
9368 	eth_hw_addr_set(dev, addr->sa_data);
9369 
9370 	if (!netif_running(dev))
9371 		return 0;
9372 
9373 	if (tg3_flag(tp, ENABLE_ASF)) {
9374 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9375 
9376 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9377 		addr0_low = tr32(MAC_ADDR_0_LOW);
9378 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9379 		addr1_low = tr32(MAC_ADDR_1_LOW);
9380 
9381 		/* Skip MAC addr 1 if ASF is using it. */
9382 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9383 		    !(addr1_high == 0 && addr1_low == 0))
9384 			skip_mac_1 = true;
9385 	}
9386 	spin_lock_bh(&tp->lock);
9387 	__tg3_set_mac_addr(tp, skip_mac_1);
9388 	__tg3_set_rx_mode(dev);
9389 	spin_unlock_bh(&tp->lock);
9390 
9391 	return err;
9392 }
9393 
9394 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9395 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9396 			   dma_addr_t mapping, u32 maxlen_flags,
9397 			   u32 nic_addr)
9398 {
9399 	tg3_write_mem(tp,
9400 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9401 		      ((u64) mapping >> 32));
9402 	tg3_write_mem(tp,
9403 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9404 		      ((u64) mapping & 0xffffffff));
9405 	tg3_write_mem(tp,
9406 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9407 		       maxlen_flags);
9408 
9409 	if (!tg3_flag(tp, 5705_PLUS))
9410 		tg3_write_mem(tp,
9411 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9412 			      nic_addr);
9413 }
9414 
9415 
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9416 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9417 {
9418 	int i = 0;
9419 
9420 	if (!tg3_flag(tp, ENABLE_TSS)) {
9421 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9422 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9423 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9424 	} else {
9425 		tw32(HOSTCC_TXCOL_TICKS, 0);
9426 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9427 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9428 
9429 		for (; i < tp->txq_cnt; i++) {
9430 			u32 reg;
9431 
9432 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9433 			tw32(reg, ec->tx_coalesce_usecs);
9434 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9435 			tw32(reg, ec->tx_max_coalesced_frames);
9436 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9437 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9438 		}
9439 	}
9440 
9441 	for (; i < tp->irq_max - 1; i++) {
9442 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9443 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9444 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9445 	}
9446 }
9447 
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9448 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9449 {
9450 	int i = 0;
9451 	u32 limit = tp->rxq_cnt;
9452 
9453 	if (!tg3_flag(tp, ENABLE_RSS)) {
9454 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9455 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9456 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9457 		limit--;
9458 	} else {
9459 		tw32(HOSTCC_RXCOL_TICKS, 0);
9460 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9461 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9462 	}
9463 
9464 	for (; i < limit; i++) {
9465 		u32 reg;
9466 
9467 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9468 		tw32(reg, ec->rx_coalesce_usecs);
9469 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9470 		tw32(reg, ec->rx_max_coalesced_frames);
9471 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9472 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9473 	}
9474 
9475 	for (; i < tp->irq_max - 1; i++) {
9476 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9477 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9478 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9479 	}
9480 }
9481 
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9482 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9483 {
9484 	tg3_coal_tx_init(tp, ec);
9485 	tg3_coal_rx_init(tp, ec);
9486 
9487 	if (!tg3_flag(tp, 5705_PLUS)) {
9488 		u32 val = ec->stats_block_coalesce_usecs;
9489 
9490 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9491 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9492 
9493 		if (!tp->link_up)
9494 			val = 0;
9495 
9496 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9497 	}
9498 }
9499 
9500 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9501 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9502 {
9503 	u32 txrcb, limit;
9504 
9505 	/* Disable all transmit rings but the first. */
9506 	if (!tg3_flag(tp, 5705_PLUS))
9507 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9508 	else if (tg3_flag(tp, 5717_PLUS))
9509 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9510 	else if (tg3_flag(tp, 57765_CLASS) ||
9511 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9512 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9513 	else
9514 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9515 
9516 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9517 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9518 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9519 			      BDINFO_FLAGS_DISABLED);
9520 }
9521 
9522 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9523 static void tg3_tx_rcbs_init(struct tg3 *tp)
9524 {
9525 	int i = 0;
9526 	u32 txrcb = NIC_SRAM_SEND_RCB;
9527 
9528 	if (tg3_flag(tp, ENABLE_TSS))
9529 		i++;
9530 
9531 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9532 		struct tg3_napi *tnapi = &tp->napi[i];
9533 
9534 		if (!tnapi->tx_ring)
9535 			continue;
9536 
9537 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9538 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9539 			       NIC_SRAM_TX_BUFFER_DESC);
9540 	}
9541 }
9542 
9543 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9544 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9545 {
9546 	u32 rxrcb, limit;
9547 
9548 	/* Disable all receive return rings but the first. */
9549 	if (tg3_flag(tp, 5717_PLUS))
9550 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9551 	else if (!tg3_flag(tp, 5705_PLUS))
9552 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9553 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9554 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9555 		 tg3_flag(tp, 57765_CLASS))
9556 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9557 	else
9558 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9559 
9560 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9561 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9562 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9563 			      BDINFO_FLAGS_DISABLED);
9564 }
9565 
9566 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9567 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9568 {
9569 	int i = 0;
9570 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9571 
9572 	if (tg3_flag(tp, ENABLE_RSS))
9573 		i++;
9574 
9575 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9576 		struct tg3_napi *tnapi = &tp->napi[i];
9577 
9578 		if (!tnapi->rx_rcb)
9579 			continue;
9580 
9581 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9582 			       (tp->rx_ret_ring_mask + 1) <<
9583 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9584 	}
9585 }
9586 
9587 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9588 static void tg3_rings_reset(struct tg3 *tp)
9589 {
9590 	int i;
9591 	u32 stblk;
9592 	struct tg3_napi *tnapi = &tp->napi[0];
9593 
9594 	tg3_tx_rcbs_disable(tp);
9595 
9596 	tg3_rx_ret_rcbs_disable(tp);
9597 
9598 	/* Disable interrupts */
9599 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9600 	tp->napi[0].chk_msi_cnt = 0;
9601 	tp->napi[0].last_rx_cons = 0;
9602 	tp->napi[0].last_tx_cons = 0;
9603 
9604 	/* Zero mailbox registers. */
9605 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9606 		for (i = 1; i < tp->irq_max; i++) {
9607 			tp->napi[i].tx_prod = 0;
9608 			tp->napi[i].tx_cons = 0;
9609 			if (tg3_flag(tp, ENABLE_TSS))
9610 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9611 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9612 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9613 			tp->napi[i].chk_msi_cnt = 0;
9614 			tp->napi[i].last_rx_cons = 0;
9615 			tp->napi[i].last_tx_cons = 0;
9616 		}
9617 		if (!tg3_flag(tp, ENABLE_TSS))
9618 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9619 	} else {
9620 		tp->napi[0].tx_prod = 0;
9621 		tp->napi[0].tx_cons = 0;
9622 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9623 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9624 	}
9625 
9626 	/* Make sure the NIC-based send BD rings are disabled. */
9627 	if (!tg3_flag(tp, 5705_PLUS)) {
9628 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9629 		for (i = 0; i < 16; i++)
9630 			tw32_tx_mbox(mbox + i * 8, 0);
9631 	}
9632 
9633 	/* Clear status block in ram. */
9634 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9635 
9636 	/* Set status block DMA address */
9637 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9638 	     ((u64) tnapi->status_mapping >> 32));
9639 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9640 	     ((u64) tnapi->status_mapping & 0xffffffff));
9641 
9642 	stblk = HOSTCC_STATBLCK_RING1;
9643 
9644 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9645 		u64 mapping = (u64)tnapi->status_mapping;
9646 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9647 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9648 		stblk += 8;
9649 
9650 		/* Clear status block in ram. */
9651 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9652 	}
9653 
9654 	tg3_tx_rcbs_init(tp);
9655 	tg3_rx_ret_rcbs_init(tp);
9656 }
9657 
tg3_setup_rxbd_thresholds(struct tg3 * tp)9658 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9659 {
9660 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9661 
9662 	if (!tg3_flag(tp, 5750_PLUS) ||
9663 	    tg3_flag(tp, 5780_CLASS) ||
9664 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9665 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9666 	    tg3_flag(tp, 57765_PLUS))
9667 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9668 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9669 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9670 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9671 	else
9672 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9673 
9674 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9675 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9676 
9677 	val = min(nic_rep_thresh, host_rep_thresh);
9678 	tw32(RCVBDI_STD_THRESH, val);
9679 
9680 	if (tg3_flag(tp, 57765_PLUS))
9681 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9682 
9683 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9684 		return;
9685 
9686 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9687 
9688 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9689 
9690 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9691 	tw32(RCVBDI_JUMBO_THRESH, val);
9692 
9693 	if (tg3_flag(tp, 57765_PLUS))
9694 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9695 }
9696 
calc_crc(unsigned char * buf,int len)9697 static inline u32 calc_crc(unsigned char *buf, int len)
9698 {
9699 	u32 reg;
9700 	u32 tmp;
9701 	int j, k;
9702 
9703 	reg = 0xffffffff;
9704 
9705 	for (j = 0; j < len; j++) {
9706 		reg ^= buf[j];
9707 
9708 		for (k = 0; k < 8; k++) {
9709 			tmp = reg & 0x01;
9710 
9711 			reg >>= 1;
9712 
9713 			if (tmp)
9714 				reg ^= CRC32_POLY_LE;
9715 		}
9716 	}
9717 
9718 	return ~reg;
9719 }
9720 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9721 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9722 {
9723 	/* accept or reject all multicast frames */
9724 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9725 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9726 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9727 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9728 }
9729 
__tg3_set_rx_mode(struct net_device * dev)9730 static void __tg3_set_rx_mode(struct net_device *dev)
9731 {
9732 	struct tg3 *tp = netdev_priv(dev);
9733 	u32 rx_mode;
9734 
9735 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9736 				  RX_MODE_KEEP_VLAN_TAG);
9737 
9738 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9739 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9740 	 * flag clear.
9741 	 */
9742 	if (!tg3_flag(tp, ENABLE_ASF))
9743 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9744 #endif
9745 
9746 	if (dev->flags & IFF_PROMISC) {
9747 		/* Promiscuous mode. */
9748 		rx_mode |= RX_MODE_PROMISC;
9749 	} else if (dev->flags & IFF_ALLMULTI) {
9750 		/* Accept all multicast. */
9751 		tg3_set_multi(tp, 1);
9752 	} else if (netdev_mc_empty(dev)) {
9753 		/* Reject all multicast. */
9754 		tg3_set_multi(tp, 0);
9755 	} else {
9756 		/* Accept one or more multicast(s). */
9757 		struct netdev_hw_addr *ha;
9758 		u32 mc_filter[4] = { 0, };
9759 		u32 regidx;
9760 		u32 bit;
9761 		u32 crc;
9762 
9763 		netdev_for_each_mc_addr(ha, dev) {
9764 			crc = calc_crc(ha->addr, ETH_ALEN);
9765 			bit = ~crc & 0x7f;
9766 			regidx = (bit & 0x60) >> 5;
9767 			bit &= 0x1f;
9768 			mc_filter[regidx] |= (1 << bit);
9769 		}
9770 
9771 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9772 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9773 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9774 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9775 	}
9776 
9777 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9778 		rx_mode |= RX_MODE_PROMISC;
9779 	} else if (!(dev->flags & IFF_PROMISC)) {
9780 		/* Add all entries into to the mac addr filter list */
9781 		int i = 0;
9782 		struct netdev_hw_addr *ha;
9783 
9784 		netdev_for_each_uc_addr(ha, dev) {
9785 			__tg3_set_one_mac_addr(tp, ha->addr,
9786 					       i + TG3_UCAST_ADDR_IDX(tp));
9787 			i++;
9788 		}
9789 	}
9790 
9791 	if (rx_mode != tp->rx_mode) {
9792 		tp->rx_mode = rx_mode;
9793 		tw32_f(MAC_RX_MODE, rx_mode);
9794 		udelay(10);
9795 	}
9796 }
9797 
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9798 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9799 {
9800 	int i;
9801 
9802 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9803 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9804 }
9805 
tg3_rss_check_indir_tbl(struct tg3 * tp)9806 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9807 {
9808 	int i;
9809 
9810 	if (!tg3_flag(tp, SUPPORT_MSIX))
9811 		return;
9812 
9813 	if (tp->rxq_cnt == 1) {
9814 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9815 		return;
9816 	}
9817 
9818 	/* Validate table against current IRQ count */
9819 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9820 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9821 			break;
9822 	}
9823 
9824 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9825 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9826 }
9827 
tg3_rss_write_indir_tbl(struct tg3 * tp)9828 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9829 {
9830 	int i = 0;
9831 	u32 reg = MAC_RSS_INDIR_TBL_0;
9832 
9833 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9834 		u32 val = tp->rss_ind_tbl[i];
9835 		i++;
9836 		for (; i % 8; i++) {
9837 			val <<= 4;
9838 			val |= tp->rss_ind_tbl[i];
9839 		}
9840 		tw32(reg, val);
9841 		reg += 4;
9842 	}
9843 }
9844 
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9845 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9846 {
9847 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9848 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9849 	else
9850 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9851 }
9852 
9853 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9854 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9855 {
9856 	u32 val, rdmac_mode;
9857 	int i, err, limit;
9858 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9859 
9860 	tg3_disable_ints(tp);
9861 
9862 	tg3_stop_fw(tp);
9863 
9864 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9865 
9866 	if (tg3_flag(tp, INIT_COMPLETE))
9867 		tg3_abort_hw(tp, 1);
9868 
9869 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9870 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9871 		tg3_phy_pull_config(tp);
9872 		tg3_eee_pull_config(tp, NULL);
9873 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9874 	}
9875 
9876 	/* Enable MAC control of LPI */
9877 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9878 		tg3_setup_eee(tp);
9879 
9880 	if (reset_phy)
9881 		tg3_phy_reset(tp);
9882 
9883 	err = tg3_chip_reset(tp);
9884 	if (err)
9885 		return err;
9886 
9887 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9888 
9889 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9890 		val = tr32(TG3_CPMU_CTRL);
9891 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9892 		tw32(TG3_CPMU_CTRL, val);
9893 
9894 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9895 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9896 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9897 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9898 
9899 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9900 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9901 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9902 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9903 
9904 		val = tr32(TG3_CPMU_HST_ACC);
9905 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9906 		val |= CPMU_HST_ACC_MACCLK_6_25;
9907 		tw32(TG3_CPMU_HST_ACC, val);
9908 	}
9909 
9910 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9911 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9912 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9913 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9914 		tw32(PCIE_PWR_MGMT_THRESH, val);
9915 
9916 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9917 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9918 
9919 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9920 
9921 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9922 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9923 	}
9924 
9925 	if (tg3_flag(tp, L1PLLPD_EN)) {
9926 		u32 grc_mode = tr32(GRC_MODE);
9927 
9928 		/* Access the lower 1K of PL PCIE block registers. */
9929 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9930 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9931 
9932 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9933 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9934 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9935 
9936 		tw32(GRC_MODE, grc_mode);
9937 	}
9938 
9939 	if (tg3_flag(tp, 57765_CLASS)) {
9940 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9941 			u32 grc_mode = tr32(GRC_MODE);
9942 
9943 			/* Access the lower 1K of PL PCIE block registers. */
9944 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9945 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9946 
9947 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9948 				   TG3_PCIE_PL_LO_PHYCTL5);
9949 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9950 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9951 
9952 			tw32(GRC_MODE, grc_mode);
9953 		}
9954 
9955 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9956 			u32 grc_mode;
9957 
9958 			/* Fix transmit hangs */
9959 			val = tr32(TG3_CPMU_PADRNG_CTL);
9960 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9961 			tw32(TG3_CPMU_PADRNG_CTL, val);
9962 
9963 			grc_mode = tr32(GRC_MODE);
9964 
9965 			/* Access the lower 1K of DL PCIE block registers. */
9966 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9967 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9968 
9969 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9970 				   TG3_PCIE_DL_LO_FTSMAX);
9971 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9972 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9973 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9974 
9975 			tw32(GRC_MODE, grc_mode);
9976 		}
9977 
9978 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9979 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9980 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9981 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9982 	}
9983 
9984 	/* This works around an issue with Athlon chipsets on
9985 	 * B3 tigon3 silicon.  This bit has no effect on any
9986 	 * other revision.  But do not set this on PCI Express
9987 	 * chips and don't even touch the clocks if the CPMU is present.
9988 	 */
9989 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9990 		if (!tg3_flag(tp, PCI_EXPRESS))
9991 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9992 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9993 	}
9994 
9995 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9996 	    tg3_flag(tp, PCIX_MODE)) {
9997 		val = tr32(TG3PCI_PCISTATE);
9998 		val |= PCISTATE_RETRY_SAME_DMA;
9999 		tw32(TG3PCI_PCISTATE, val);
10000 	}
10001 
10002 	if (tg3_flag(tp, ENABLE_APE)) {
10003 		/* Allow reads and writes to the
10004 		 * APE register and memory space.
10005 		 */
10006 		val = tr32(TG3PCI_PCISTATE);
10007 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10008 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10009 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10010 		tw32(TG3PCI_PCISTATE, val);
10011 	}
10012 
10013 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10014 		/* Enable some hw fixes.  */
10015 		val = tr32(TG3PCI_MSI_DATA);
10016 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10017 		tw32(TG3PCI_MSI_DATA, val);
10018 	}
10019 
10020 	/* Descriptor ring init may make accesses to the
10021 	 * NIC SRAM area to setup the TX descriptors, so we
10022 	 * can only do this after the hardware has been
10023 	 * successfully reset.
10024 	 */
10025 	err = tg3_init_rings(tp);
10026 	if (err)
10027 		return err;
10028 
10029 	if (tg3_flag(tp, 57765_PLUS)) {
10030 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10031 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10032 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10033 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10034 		if (!tg3_flag(tp, 57765_CLASS) &&
10035 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10036 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10037 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10038 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10039 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10040 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10041 		/* This value is determined during the probe time DMA
10042 		 * engine test, tg3_test_dma.
10043 		 */
10044 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10045 	}
10046 
10047 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10048 			  GRC_MODE_4X_NIC_SEND_RINGS |
10049 			  GRC_MODE_NO_TX_PHDR_CSUM |
10050 			  GRC_MODE_NO_RX_PHDR_CSUM);
10051 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10052 
10053 	/* Pseudo-header checksum is done by hardware logic and not
10054 	 * the offload processers, so make the chip do the pseudo-
10055 	 * header checksums on receive.  For transmit it is more
10056 	 * convenient to do the pseudo-header checksum in software
10057 	 * as Linux does that on transmit for us in all cases.
10058 	 */
10059 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10060 
10061 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10062 	if (tp->rxptpctl)
10063 		tw32(TG3_RX_PTP_CTL,
10064 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10065 
10066 	if (tg3_flag(tp, PTP_CAPABLE))
10067 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10068 
10069 	tw32(GRC_MODE, tp->grc_mode | val);
10070 
10071 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10072 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10073 	 * to 2048 instead of default 4096.
10074 	 */
10075 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10076 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10077 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10078 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10079 	}
10080 
10081 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10082 	val = tr32(GRC_MISC_CFG);
10083 	val &= ~0xff;
10084 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10085 	tw32(GRC_MISC_CFG, val);
10086 
10087 	/* Initialize MBUF/DESC pool. */
10088 	if (tg3_flag(tp, 5750_PLUS)) {
10089 		/* Do nothing.  */
10090 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10091 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10092 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10093 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10094 		else
10095 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10096 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10097 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10098 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10099 		int fw_len;
10100 
10101 		fw_len = tp->fw_len;
10102 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10103 		tw32(BUFMGR_MB_POOL_ADDR,
10104 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10105 		tw32(BUFMGR_MB_POOL_SIZE,
10106 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10107 	}
10108 
10109 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10110 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10111 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10112 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10113 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10114 		tw32(BUFMGR_MB_HIGH_WATER,
10115 		     tp->bufmgr_config.mbuf_high_water);
10116 	} else {
10117 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10118 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10119 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10120 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10121 		tw32(BUFMGR_MB_HIGH_WATER,
10122 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10123 	}
10124 	tw32(BUFMGR_DMA_LOW_WATER,
10125 	     tp->bufmgr_config.dma_low_water);
10126 	tw32(BUFMGR_DMA_HIGH_WATER,
10127 	     tp->bufmgr_config.dma_high_water);
10128 
10129 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10130 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10131 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10132 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10133 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10134 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10135 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10136 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10137 	tw32(BUFMGR_MODE, val);
10138 	for (i = 0; i < 2000; i++) {
10139 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10140 			break;
10141 		udelay(10);
10142 	}
10143 	if (i >= 2000) {
10144 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10145 		return -ENODEV;
10146 	}
10147 
10148 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10149 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10150 
10151 	tg3_setup_rxbd_thresholds(tp);
10152 
10153 	/* Initialize TG3_BDINFO's at:
10154 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10155 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10156 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10157 	 *
10158 	 * like so:
10159 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10160 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10161 	 *                              ring attribute flags
10162 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10163 	 *
10164 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10165 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10166 	 *
10167 	 * The size of each ring is fixed in the firmware, but the location is
10168 	 * configurable.
10169 	 */
10170 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10171 	     ((u64) tpr->rx_std_mapping >> 32));
10172 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10173 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10174 	if (!tg3_flag(tp, 5717_PLUS))
10175 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10176 		     NIC_SRAM_RX_BUFFER_DESC);
10177 
10178 	/* Disable the mini ring */
10179 	if (!tg3_flag(tp, 5705_PLUS))
10180 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10181 		     BDINFO_FLAGS_DISABLED);
10182 
10183 	/* Program the jumbo buffer descriptor ring control
10184 	 * blocks on those devices that have them.
10185 	 */
10186 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10187 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10188 
10189 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10190 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10191 			     ((u64) tpr->rx_jmb_mapping >> 32));
10192 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10193 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10194 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10195 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10196 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10197 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10198 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10199 			    tg3_flag(tp, 57765_CLASS) ||
10200 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10201 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10202 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10203 		} else {
10204 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205 			     BDINFO_FLAGS_DISABLED);
10206 		}
10207 
10208 		if (tg3_flag(tp, 57765_PLUS)) {
10209 			val = TG3_RX_STD_RING_SIZE(tp);
10210 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10211 			val |= (TG3_RX_STD_DMA_SZ << 2);
10212 		} else
10213 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10214 	} else
10215 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10216 
10217 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10218 
10219 	tpr->rx_std_prod_idx = tp->rx_pending;
10220 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10221 
10222 	tpr->rx_jmb_prod_idx =
10223 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10224 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10225 
10226 	tg3_rings_reset(tp);
10227 
10228 	/* Initialize MAC address and backoff seed. */
10229 	__tg3_set_mac_addr(tp, false);
10230 
10231 	/* MTU + ethernet header + FCS + optional VLAN tag */
10232 	tw32(MAC_RX_MTU_SIZE,
10233 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10234 
10235 	/* The slot time is changed by tg3_setup_phy if we
10236 	 * run at gigabit with half duplex.
10237 	 */
10238 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10239 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10240 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10241 
10242 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10243 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10244 		val |= tr32(MAC_TX_LENGTHS) &
10245 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10246 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10247 
10248 	tw32(MAC_TX_LENGTHS, val);
10249 
10250 	/* Receive rules. */
10251 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10252 	tw32(RCVLPC_CONFIG, 0x0181);
10253 
10254 	/* Calculate RDMAC_MODE setting early, we need it to determine
10255 	 * the RCVLPC_STATE_ENABLE mask.
10256 	 */
10257 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10258 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10259 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10260 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10261 		      RDMAC_MODE_LNGREAD_ENAB);
10262 
10263 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10264 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10265 
10266 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10267 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10268 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10269 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10270 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10271 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10272 
10273 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10274 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10275 		if (tg3_flag(tp, TSO_CAPABLE)) {
10276 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10277 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10278 			   !tg3_flag(tp, IS_5788)) {
10279 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10280 		}
10281 	}
10282 
10283 	if (tg3_flag(tp, PCI_EXPRESS))
10284 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10285 
10286 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10287 		tp->dma_limit = 0;
10288 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10289 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10290 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10291 		}
10292 	}
10293 
10294 	if (tg3_flag(tp, HW_TSO_1) ||
10295 	    tg3_flag(tp, HW_TSO_2) ||
10296 	    tg3_flag(tp, HW_TSO_3))
10297 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10298 
10299 	if (tg3_flag(tp, 57765_PLUS) ||
10300 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10301 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10302 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10303 
10304 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10305 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10306 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10307 
10308 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10309 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10310 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10311 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10312 	    tg3_flag(tp, 57765_PLUS)) {
10313 		u32 tgtreg;
10314 
10315 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10316 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10317 		else
10318 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10319 
10320 		val = tr32(tgtreg);
10321 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10322 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10323 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10324 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10325 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10326 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10327 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10328 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10329 		}
10330 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10331 	}
10332 
10333 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10334 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10335 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10336 		u32 tgtreg;
10337 
10338 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10339 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10340 		else
10341 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10342 
10343 		val = tr32(tgtreg);
10344 		tw32(tgtreg, val |
10345 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10346 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10347 	}
10348 
10349 	/* Receive/send statistics. */
10350 	if (tg3_flag(tp, 5750_PLUS)) {
10351 		val = tr32(RCVLPC_STATS_ENABLE);
10352 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10353 		tw32(RCVLPC_STATS_ENABLE, val);
10354 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10355 		   tg3_flag(tp, TSO_CAPABLE)) {
10356 		val = tr32(RCVLPC_STATS_ENABLE);
10357 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10358 		tw32(RCVLPC_STATS_ENABLE, val);
10359 	} else {
10360 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10361 	}
10362 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10363 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10364 	tw32(SNDDATAI_STATSCTRL,
10365 	     (SNDDATAI_SCTRL_ENABLE |
10366 	      SNDDATAI_SCTRL_FASTUPD));
10367 
10368 	/* Setup host coalescing engine. */
10369 	tw32(HOSTCC_MODE, 0);
10370 	for (i = 0; i < 2000; i++) {
10371 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10372 			break;
10373 		udelay(10);
10374 	}
10375 
10376 	__tg3_set_coalesce(tp, &tp->coal);
10377 
10378 	if (!tg3_flag(tp, 5705_PLUS)) {
10379 		/* Status/statistics block address.  See tg3_timer,
10380 		 * the tg3_periodic_fetch_stats call there, and
10381 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10382 		 */
10383 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10384 		     ((u64) tp->stats_mapping >> 32));
10385 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10386 		     ((u64) tp->stats_mapping & 0xffffffff));
10387 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10388 
10389 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10390 
10391 		/* Clear statistics and status block memory areas */
10392 		for (i = NIC_SRAM_STATS_BLK;
10393 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10394 		     i += sizeof(u32)) {
10395 			tg3_write_mem(tp, i, 0);
10396 			udelay(40);
10397 		}
10398 	}
10399 
10400 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10401 
10402 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10403 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10404 	if (!tg3_flag(tp, 5705_PLUS))
10405 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10406 
10407 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10408 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10409 		/* reset to prevent losing 1st rx packet intermittently */
10410 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10411 		udelay(10);
10412 	}
10413 
10414 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10415 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10416 			MAC_MODE_FHDE_ENABLE;
10417 	if (tg3_flag(tp, ENABLE_APE))
10418 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10419 	if (!tg3_flag(tp, 5705_PLUS) &&
10420 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10421 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10422 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10423 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10424 	udelay(40);
10425 
10426 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10427 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10428 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10429 	 * whether used as inputs or outputs, are set by boot code after
10430 	 * reset.
10431 	 */
10432 	if (!tg3_flag(tp, IS_NIC)) {
10433 		u32 gpio_mask;
10434 
10435 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10436 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10437 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10438 
10439 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10440 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10441 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10442 
10443 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10444 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10445 
10446 		tp->grc_local_ctrl &= ~gpio_mask;
10447 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10448 
10449 		/* GPIO1 must be driven high for eeprom write protect */
10450 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10451 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10452 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10453 	}
10454 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10455 	udelay(100);
10456 
10457 	if (tg3_flag(tp, USING_MSIX)) {
10458 		val = tr32(MSGINT_MODE);
10459 		val |= MSGINT_MODE_ENABLE;
10460 		if (tp->irq_cnt > 1)
10461 			val |= MSGINT_MODE_MULTIVEC_EN;
10462 		if (!tg3_flag(tp, 1SHOT_MSI))
10463 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10464 		tw32(MSGINT_MODE, val);
10465 	}
10466 
10467 	if (!tg3_flag(tp, 5705_PLUS)) {
10468 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10469 		udelay(40);
10470 	}
10471 
10472 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10473 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10474 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10475 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10476 	       WDMAC_MODE_LNGREAD_ENAB);
10477 
10478 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10479 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10480 		if (tg3_flag(tp, TSO_CAPABLE) &&
10481 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10482 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10483 			/* nothing */
10484 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10485 			   !tg3_flag(tp, IS_5788)) {
10486 			val |= WDMAC_MODE_RX_ACCEL;
10487 		}
10488 	}
10489 
10490 	/* Enable host coalescing bug fix */
10491 	if (tg3_flag(tp, 5755_PLUS))
10492 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10493 
10494 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10495 		val |= WDMAC_MODE_BURST_ALL_DATA;
10496 
10497 	tw32_f(WDMAC_MODE, val);
10498 	udelay(40);
10499 
10500 	if (tg3_flag(tp, PCIX_MODE)) {
10501 		u16 pcix_cmd;
10502 
10503 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10504 				     &pcix_cmd);
10505 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10506 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10507 			pcix_cmd |= PCI_X_CMD_READ_2K;
10508 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10509 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10510 			pcix_cmd |= PCI_X_CMD_READ_2K;
10511 		}
10512 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10513 				      pcix_cmd);
10514 	}
10515 
10516 	tw32_f(RDMAC_MODE, rdmac_mode);
10517 	udelay(40);
10518 
10519 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10520 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10521 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10522 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10523 				break;
10524 		}
10525 		if (i < TG3_NUM_RDMA_CHANNELS) {
10526 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10527 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10528 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10529 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10530 		}
10531 	}
10532 
10533 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10534 	if (!tg3_flag(tp, 5705_PLUS))
10535 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10536 
10537 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10538 		tw32(SNDDATAC_MODE,
10539 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10540 	else
10541 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10542 
10543 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10544 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10545 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10546 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10547 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10548 	tw32(RCVDBDI_MODE, val);
10549 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10550 	if (tg3_flag(tp, HW_TSO_1) ||
10551 	    tg3_flag(tp, HW_TSO_2) ||
10552 	    tg3_flag(tp, HW_TSO_3))
10553 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10554 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10555 	if (tg3_flag(tp, ENABLE_TSS))
10556 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10557 	tw32(SNDBDI_MODE, val);
10558 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10559 
10560 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10561 		err = tg3_load_5701_a0_firmware_fix(tp);
10562 		if (err)
10563 			return err;
10564 	}
10565 
10566 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10567 		/* Ignore any errors for the firmware download. If download
10568 		 * fails, the device will operate with EEE disabled
10569 		 */
10570 		tg3_load_57766_firmware(tp);
10571 	}
10572 
10573 	if (tg3_flag(tp, TSO_CAPABLE)) {
10574 		err = tg3_load_tso_firmware(tp);
10575 		if (err)
10576 			return err;
10577 	}
10578 
10579 	tp->tx_mode = TX_MODE_ENABLE;
10580 
10581 	if (tg3_flag(tp, 5755_PLUS) ||
10582 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10583 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10584 
10585 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10586 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10587 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10588 		tp->tx_mode &= ~val;
10589 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10590 	}
10591 
10592 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10593 	udelay(100);
10594 
10595 	if (tg3_flag(tp, ENABLE_RSS)) {
10596 		u32 rss_key[10];
10597 
10598 		tg3_rss_write_indir_tbl(tp);
10599 
10600 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10601 
10602 		for (i = 0; i < 10 ; i++)
10603 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10604 	}
10605 
10606 	tp->rx_mode = RX_MODE_ENABLE;
10607 	if (tg3_flag(tp, 5755_PLUS))
10608 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10609 
10610 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10611 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10612 
10613 	if (tg3_flag(tp, ENABLE_RSS))
10614 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10615 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10616 			       RX_MODE_RSS_IPV6_HASH_EN |
10617 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10618 			       RX_MODE_RSS_IPV4_HASH_EN |
10619 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10620 
10621 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10622 	udelay(10);
10623 
10624 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10625 
10626 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10627 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10628 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10629 		udelay(10);
10630 	}
10631 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10632 	udelay(10);
10633 
10634 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10635 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10636 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10637 			/* Set drive transmission level to 1.2V  */
10638 			/* only if the signal pre-emphasis bit is not set  */
10639 			val = tr32(MAC_SERDES_CFG);
10640 			val &= 0xfffff000;
10641 			val |= 0x880;
10642 			tw32(MAC_SERDES_CFG, val);
10643 		}
10644 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10645 			tw32(MAC_SERDES_CFG, 0x616000);
10646 	}
10647 
10648 	/* Prevent chip from dropping frames when flow control
10649 	 * is enabled.
10650 	 */
10651 	if (tg3_flag(tp, 57765_CLASS))
10652 		val = 1;
10653 	else
10654 		val = 2;
10655 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10656 
10657 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10658 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10659 		/* Use hardware link auto-negotiation */
10660 		tg3_flag_set(tp, HW_AUTONEG);
10661 	}
10662 
10663 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10664 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10665 		u32 tmp;
10666 
10667 		tmp = tr32(SERDES_RX_CTRL);
10668 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10669 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10670 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10671 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10672 	}
10673 
10674 	if (!tg3_flag(tp, USE_PHYLIB)) {
10675 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10676 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10677 
10678 		err = tg3_setup_phy(tp, false);
10679 		if (err)
10680 			return err;
10681 
10682 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10683 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10684 			u32 tmp;
10685 
10686 			/* Clear CRC stats. */
10687 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10688 				tg3_writephy(tp, MII_TG3_TEST1,
10689 					     tmp | MII_TG3_TEST1_CRC_EN);
10690 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10691 			}
10692 		}
10693 	}
10694 
10695 	__tg3_set_rx_mode(tp->dev);
10696 
10697 	/* Initialize receive rules. */
10698 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10699 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10700 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10701 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10702 
10703 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10704 		limit = 8;
10705 	else
10706 		limit = 16;
10707 	if (tg3_flag(tp, ENABLE_ASF))
10708 		limit -= 4;
10709 	switch (limit) {
10710 	case 16:
10711 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10712 		fallthrough;
10713 	case 15:
10714 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10715 		fallthrough;
10716 	case 14:
10717 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10718 		fallthrough;
10719 	case 13:
10720 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10721 		fallthrough;
10722 	case 12:
10723 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10724 		fallthrough;
10725 	case 11:
10726 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10727 		fallthrough;
10728 	case 10:
10729 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10730 		fallthrough;
10731 	case 9:
10732 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10733 		fallthrough;
10734 	case 8:
10735 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10736 		fallthrough;
10737 	case 7:
10738 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10739 		fallthrough;
10740 	case 6:
10741 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10742 		fallthrough;
10743 	case 5:
10744 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10745 		fallthrough;
10746 	case 4:
10747 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10748 	case 3:
10749 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10750 	case 2:
10751 	case 1:
10752 
10753 	default:
10754 		break;
10755 	}
10756 
10757 	if (tg3_flag(tp, ENABLE_APE))
10758 		/* Write our heartbeat update interval to APE. */
10759 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10760 				APE_HOST_HEARTBEAT_INT_5SEC);
10761 
10762 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10763 
10764 	return 0;
10765 }
10766 
10767 /* Called at device open time to get the chip ready for
10768  * packet processing.  Invoked with tp->lock held.
10769  */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10770 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10771 {
10772 	/* Chip may have been just powered on. If so, the boot code may still
10773 	 * be running initialization. Wait for it to finish to avoid races in
10774 	 * accessing the hardware.
10775 	 */
10776 	tg3_enable_register_access(tp);
10777 	tg3_poll_fw(tp);
10778 
10779 	tg3_switch_clocks(tp);
10780 
10781 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10782 
10783 	return tg3_reset_hw(tp, reset_phy);
10784 }
10785 
10786 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10787 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10788 {
10789 	u32 off, len = TG3_OCIR_LEN;
10790 	int i;
10791 
10792 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10793 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10794 
10795 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10796 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10797 			memset(ocir, 0, len);
10798 	}
10799 }
10800 
10801 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10802 static ssize_t tg3_show_temp(struct device *dev,
10803 			     struct device_attribute *devattr, char *buf)
10804 {
10805 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10806 	struct tg3 *tp = dev_get_drvdata(dev);
10807 	u32 temperature;
10808 
10809 	spin_lock_bh(&tp->lock);
10810 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10811 				sizeof(temperature));
10812 	spin_unlock_bh(&tp->lock);
10813 	return sprintf(buf, "%u\n", temperature * 1000);
10814 }
10815 
10816 
10817 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10818 			  TG3_TEMP_SENSOR_OFFSET);
10819 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10820 			  TG3_TEMP_CAUTION_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10822 			  TG3_TEMP_MAX_OFFSET);
10823 
10824 static struct attribute *tg3_attrs[] = {
10825 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10826 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10827 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10828 	NULL
10829 };
10830 ATTRIBUTE_GROUPS(tg3);
10831 
tg3_hwmon_close(struct tg3 * tp)10832 static void tg3_hwmon_close(struct tg3 *tp)
10833 {
10834 	if (tp->hwmon_dev) {
10835 		hwmon_device_unregister(tp->hwmon_dev);
10836 		tp->hwmon_dev = NULL;
10837 	}
10838 }
10839 
tg3_hwmon_open(struct tg3 * tp)10840 static void tg3_hwmon_open(struct tg3 *tp)
10841 {
10842 	int i;
10843 	u32 size = 0;
10844 	struct pci_dev *pdev = tp->pdev;
10845 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10846 
10847 	tg3_sd_scan_scratchpad(tp, ocirs);
10848 
10849 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10850 		if (!ocirs[i].src_data_length)
10851 			continue;
10852 
10853 		size += ocirs[i].src_hdr_length;
10854 		size += ocirs[i].src_data_length;
10855 	}
10856 
10857 	if (!size)
10858 		return;
10859 
10860 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10861 							  tp, tg3_groups);
10862 	if (IS_ERR(tp->hwmon_dev)) {
10863 		tp->hwmon_dev = NULL;
10864 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10865 	}
10866 }
10867 #else
tg3_hwmon_close(struct tg3 * tp)10868 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10869 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10870 #endif /* CONFIG_TIGON3_HWMON */
10871 
10872 
10873 #define TG3_STAT_ADD32(PSTAT, REG) \
10874 do {	u32 __val = tr32(REG); \
10875 	(PSTAT)->low += __val; \
10876 	if ((PSTAT)->low < __val) \
10877 		(PSTAT)->high += 1; \
10878 } while (0)
10879 
tg3_periodic_fetch_stats(struct tg3 * tp)10880 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10881 {
10882 	struct tg3_hw_stats *sp = tp->hw_stats;
10883 
10884 	if (!tp->link_up)
10885 		return;
10886 
10887 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10888 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10889 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10890 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10891 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10892 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10893 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10894 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10895 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10896 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10897 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10898 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10899 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10900 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10901 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10902 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10903 		u32 val;
10904 
10905 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10906 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10907 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10908 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10909 	}
10910 
10911 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10912 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10913 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10914 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10915 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10916 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10917 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10918 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10919 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10920 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10921 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10922 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10923 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10924 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10925 
10926 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10927 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10929 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10930 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10931 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10932 	} else {
10933 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10934 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10935 		if (val) {
10936 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10937 			sp->rx_discards.low += val;
10938 			if (sp->rx_discards.low < val)
10939 				sp->rx_discards.high += 1;
10940 		}
10941 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10942 	}
10943 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10944 }
10945 
tg3_chk_missed_msi(struct tg3 * tp)10946 static void tg3_chk_missed_msi(struct tg3 *tp)
10947 {
10948 	u32 i;
10949 
10950 	for (i = 0; i < tp->irq_cnt; i++) {
10951 		struct tg3_napi *tnapi = &tp->napi[i];
10952 
10953 		if (tg3_has_work(tnapi)) {
10954 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10955 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10956 				if (tnapi->chk_msi_cnt < 1) {
10957 					tnapi->chk_msi_cnt++;
10958 					return;
10959 				}
10960 				tg3_msi(0, tnapi);
10961 			}
10962 		}
10963 		tnapi->chk_msi_cnt = 0;
10964 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10965 		tnapi->last_tx_cons = tnapi->tx_cons;
10966 	}
10967 }
10968 
tg3_timer(struct timer_list * t)10969 static void tg3_timer(struct timer_list *t)
10970 {
10971 	struct tg3 *tp = from_timer(tp, t, timer);
10972 
10973 	spin_lock(&tp->lock);
10974 
10975 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10976 		spin_unlock(&tp->lock);
10977 		goto restart_timer;
10978 	}
10979 
10980 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10981 	    tg3_flag(tp, 57765_CLASS))
10982 		tg3_chk_missed_msi(tp);
10983 
10984 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10985 		/* BCM4785: Flush posted writes from GbE to host memory. */
10986 		tr32(HOSTCC_MODE);
10987 	}
10988 
10989 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10990 		/* All of this garbage is because when using non-tagged
10991 		 * IRQ status the mailbox/status_block protocol the chip
10992 		 * uses with the cpu is race prone.
10993 		 */
10994 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10995 			tw32(GRC_LOCAL_CTRL,
10996 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10997 		} else {
10998 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10999 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11000 		}
11001 
11002 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11003 			spin_unlock(&tp->lock);
11004 			tg3_reset_task_schedule(tp);
11005 			goto restart_timer;
11006 		}
11007 	}
11008 
11009 	/* This part only runs once per second. */
11010 	if (!--tp->timer_counter) {
11011 		if (tg3_flag(tp, 5705_PLUS))
11012 			tg3_periodic_fetch_stats(tp);
11013 
11014 		if (tp->setlpicnt && !--tp->setlpicnt)
11015 			tg3_phy_eee_enable(tp);
11016 
11017 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11018 			u32 mac_stat;
11019 			int phy_event;
11020 
11021 			mac_stat = tr32(MAC_STATUS);
11022 
11023 			phy_event = 0;
11024 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11025 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11026 					phy_event = 1;
11027 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11028 				phy_event = 1;
11029 
11030 			if (phy_event)
11031 				tg3_setup_phy(tp, false);
11032 		} else if (tg3_flag(tp, POLL_SERDES)) {
11033 			u32 mac_stat = tr32(MAC_STATUS);
11034 			int need_setup = 0;
11035 
11036 			if (tp->link_up &&
11037 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11038 				need_setup = 1;
11039 			}
11040 			if (!tp->link_up &&
11041 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11042 					 MAC_STATUS_SIGNAL_DET))) {
11043 				need_setup = 1;
11044 			}
11045 			if (need_setup) {
11046 				if (!tp->serdes_counter) {
11047 					tw32_f(MAC_MODE,
11048 					     (tp->mac_mode &
11049 					      ~MAC_MODE_PORT_MODE_MASK));
11050 					udelay(40);
11051 					tw32_f(MAC_MODE, tp->mac_mode);
11052 					udelay(40);
11053 				}
11054 				tg3_setup_phy(tp, false);
11055 			}
11056 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11057 			   tg3_flag(tp, 5780_CLASS)) {
11058 			tg3_serdes_parallel_detect(tp);
11059 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11060 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11061 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11062 					 TG3_CPMU_STATUS_LINK_MASK);
11063 
11064 			if (link_up != tp->link_up)
11065 				tg3_setup_phy(tp, false);
11066 		}
11067 
11068 		tp->timer_counter = tp->timer_multiplier;
11069 	}
11070 
11071 	/* Heartbeat is only sent once every 2 seconds.
11072 	 *
11073 	 * The heartbeat is to tell the ASF firmware that the host
11074 	 * driver is still alive.  In the event that the OS crashes,
11075 	 * ASF needs to reset the hardware to free up the FIFO space
11076 	 * that may be filled with rx packets destined for the host.
11077 	 * If the FIFO is full, ASF will no longer function properly.
11078 	 *
11079 	 * Unintended resets have been reported on real time kernels
11080 	 * where the timer doesn't run on time.  Netpoll will also have
11081 	 * same problem.
11082 	 *
11083 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11084 	 * to check the ring condition when the heartbeat is expiring
11085 	 * before doing the reset.  This will prevent most unintended
11086 	 * resets.
11087 	 */
11088 	if (!--tp->asf_counter) {
11089 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11090 			tg3_wait_for_event_ack(tp);
11091 
11092 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11093 				      FWCMD_NICDRV_ALIVE3);
11094 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11095 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11096 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11097 
11098 			tg3_generate_fw_event(tp);
11099 		}
11100 		tp->asf_counter = tp->asf_multiplier;
11101 	}
11102 
11103 	/* Update the APE heartbeat every 5 seconds.*/
11104 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11105 
11106 	spin_unlock(&tp->lock);
11107 
11108 restart_timer:
11109 	tp->timer.expires = jiffies + tp->timer_offset;
11110 	add_timer(&tp->timer);
11111 }
11112 
tg3_timer_init(struct tg3 * tp)11113 static void tg3_timer_init(struct tg3 *tp)
11114 {
11115 	if (tg3_flag(tp, TAGGED_STATUS) &&
11116 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11117 	    !tg3_flag(tp, 57765_CLASS))
11118 		tp->timer_offset = HZ;
11119 	else
11120 		tp->timer_offset = HZ / 10;
11121 
11122 	BUG_ON(tp->timer_offset > HZ);
11123 
11124 	tp->timer_multiplier = (HZ / tp->timer_offset);
11125 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11126 			     TG3_FW_UPDATE_FREQ_SEC;
11127 
11128 	timer_setup(&tp->timer, tg3_timer, 0);
11129 }
11130 
tg3_timer_start(struct tg3 * tp)11131 static void tg3_timer_start(struct tg3 *tp)
11132 {
11133 	tp->asf_counter   = tp->asf_multiplier;
11134 	tp->timer_counter = tp->timer_multiplier;
11135 
11136 	tp->timer.expires = jiffies + tp->timer_offset;
11137 	add_timer(&tp->timer);
11138 }
11139 
tg3_timer_stop(struct tg3 * tp)11140 static void tg3_timer_stop(struct tg3 *tp)
11141 {
11142 	del_timer_sync(&tp->timer);
11143 }
11144 
11145 /* Restart hardware after configuration changes, self-test, etc.
11146  * Invoked with tp->lock held.
11147  */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11148 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11149 	__releases(tp->lock)
11150 	__acquires(tp->lock)
11151 {
11152 	int err;
11153 
11154 	err = tg3_init_hw(tp, reset_phy);
11155 	if (err) {
11156 		netdev_err(tp->dev,
11157 			   "Failed to re-initialize device, aborting\n");
11158 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11159 		tg3_full_unlock(tp);
11160 		tg3_timer_stop(tp);
11161 		tp->irq_sync = 0;
11162 		tg3_napi_enable(tp);
11163 		dev_close(tp->dev);
11164 		tg3_full_lock(tp, 0);
11165 	}
11166 	return err;
11167 }
11168 
tg3_reset_task(struct work_struct * work)11169 static void tg3_reset_task(struct work_struct *work)
11170 {
11171 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11172 	int err;
11173 
11174 	rtnl_lock();
11175 	tg3_full_lock(tp, 0);
11176 
11177 	if (!netif_running(tp->dev)) {
11178 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11179 		tg3_full_unlock(tp);
11180 		rtnl_unlock();
11181 		return;
11182 	}
11183 
11184 	tg3_full_unlock(tp);
11185 
11186 	tg3_phy_stop(tp);
11187 
11188 	tg3_netif_stop(tp);
11189 
11190 	tg3_full_lock(tp, 1);
11191 
11192 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11193 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11194 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11195 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11196 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11197 	}
11198 
11199 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11200 	err = tg3_init_hw(tp, true);
11201 	if (err) {
11202 		tg3_full_unlock(tp);
11203 		tp->irq_sync = 0;
11204 		tg3_napi_enable(tp);
11205 		/* Clear this flag so that tg3_reset_task_cancel() will not
11206 		 * call cancel_work_sync() and wait forever.
11207 		 */
11208 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11209 		dev_close(tp->dev);
11210 		goto out;
11211 	}
11212 
11213 	tg3_netif_start(tp);
11214 	tg3_full_unlock(tp);
11215 	tg3_phy_start(tp);
11216 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11217 out:
11218 	rtnl_unlock();
11219 }
11220 
tg3_request_irq(struct tg3 * tp,int irq_num)11221 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11222 {
11223 	irq_handler_t fn;
11224 	unsigned long flags;
11225 	char *name;
11226 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11227 
11228 	if (tp->irq_cnt == 1)
11229 		name = tp->dev->name;
11230 	else {
11231 		name = &tnapi->irq_lbl[0];
11232 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11233 			snprintf(name, IFNAMSIZ,
11234 				 "%s-txrx-%d", tp->dev->name, irq_num);
11235 		else if (tnapi->tx_buffers)
11236 			snprintf(name, IFNAMSIZ,
11237 				 "%s-tx-%d", tp->dev->name, irq_num);
11238 		else if (tnapi->rx_rcb)
11239 			snprintf(name, IFNAMSIZ,
11240 				 "%s-rx-%d", tp->dev->name, irq_num);
11241 		else
11242 			snprintf(name, IFNAMSIZ,
11243 				 "%s-%d", tp->dev->name, irq_num);
11244 		name[IFNAMSIZ-1] = 0;
11245 	}
11246 
11247 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11248 		fn = tg3_msi;
11249 		if (tg3_flag(tp, 1SHOT_MSI))
11250 			fn = tg3_msi_1shot;
11251 		flags = 0;
11252 	} else {
11253 		fn = tg3_interrupt;
11254 		if (tg3_flag(tp, TAGGED_STATUS))
11255 			fn = tg3_interrupt_tagged;
11256 		flags = IRQF_SHARED;
11257 	}
11258 
11259 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11260 }
11261 
tg3_test_interrupt(struct tg3 * tp)11262 static int tg3_test_interrupt(struct tg3 *tp)
11263 {
11264 	struct tg3_napi *tnapi = &tp->napi[0];
11265 	struct net_device *dev = tp->dev;
11266 	int err, i, intr_ok = 0;
11267 	u32 val;
11268 
11269 	if (!netif_running(dev))
11270 		return -ENODEV;
11271 
11272 	tg3_disable_ints(tp);
11273 
11274 	free_irq(tnapi->irq_vec, tnapi);
11275 
11276 	/*
11277 	 * Turn off MSI one shot mode.  Otherwise this test has no
11278 	 * observable way to know whether the interrupt was delivered.
11279 	 */
11280 	if (tg3_flag(tp, 57765_PLUS)) {
11281 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11282 		tw32(MSGINT_MODE, val);
11283 	}
11284 
11285 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11286 			  IRQF_SHARED, dev->name, tnapi);
11287 	if (err)
11288 		return err;
11289 
11290 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11291 	tg3_enable_ints(tp);
11292 
11293 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11294 	       tnapi->coal_now);
11295 
11296 	for (i = 0; i < 5; i++) {
11297 		u32 int_mbox, misc_host_ctrl;
11298 
11299 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11300 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11301 
11302 		if ((int_mbox != 0) ||
11303 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11304 			intr_ok = 1;
11305 			break;
11306 		}
11307 
11308 		if (tg3_flag(tp, 57765_PLUS) &&
11309 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11310 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11311 
11312 		msleep(10);
11313 	}
11314 
11315 	tg3_disable_ints(tp);
11316 
11317 	free_irq(tnapi->irq_vec, tnapi);
11318 
11319 	err = tg3_request_irq(tp, 0);
11320 
11321 	if (err)
11322 		return err;
11323 
11324 	if (intr_ok) {
11325 		/* Reenable MSI one shot mode. */
11326 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11327 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11328 			tw32(MSGINT_MODE, val);
11329 		}
11330 		return 0;
11331 	}
11332 
11333 	return -EIO;
11334 }
11335 
11336 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11337  * successfully restored
11338  */
tg3_test_msi(struct tg3 * tp)11339 static int tg3_test_msi(struct tg3 *tp)
11340 {
11341 	int err;
11342 	u16 pci_cmd;
11343 
11344 	if (!tg3_flag(tp, USING_MSI))
11345 		return 0;
11346 
11347 	/* Turn off SERR reporting in case MSI terminates with Master
11348 	 * Abort.
11349 	 */
11350 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11351 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11352 			      pci_cmd & ~PCI_COMMAND_SERR);
11353 
11354 	err = tg3_test_interrupt(tp);
11355 
11356 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11357 
11358 	if (!err)
11359 		return 0;
11360 
11361 	/* other failures */
11362 	if (err != -EIO)
11363 		return err;
11364 
11365 	/* MSI test failed, go back to INTx mode */
11366 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11367 		    "to INTx mode. Please report this failure to the PCI "
11368 		    "maintainer and include system chipset information\n");
11369 
11370 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11371 
11372 	pci_disable_msi(tp->pdev);
11373 
11374 	tg3_flag_clear(tp, USING_MSI);
11375 	tp->napi[0].irq_vec = tp->pdev->irq;
11376 
11377 	err = tg3_request_irq(tp, 0);
11378 	if (err)
11379 		return err;
11380 
11381 	/* Need to reset the chip because the MSI cycle may have terminated
11382 	 * with Master Abort.
11383 	 */
11384 	tg3_full_lock(tp, 1);
11385 
11386 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11387 	err = tg3_init_hw(tp, true);
11388 
11389 	tg3_full_unlock(tp);
11390 
11391 	if (err)
11392 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11393 
11394 	return err;
11395 }
11396 
tg3_request_firmware(struct tg3 * tp)11397 static int tg3_request_firmware(struct tg3 *tp)
11398 {
11399 	const struct tg3_firmware_hdr *fw_hdr;
11400 
11401 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11402 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11403 			   tp->fw_needed);
11404 		return -ENOENT;
11405 	}
11406 
11407 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11408 
11409 	/* Firmware blob starts with version numbers, followed by
11410 	 * start address and _full_ length including BSS sections
11411 	 * (which must be longer than the actual data, of course
11412 	 */
11413 
11414 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11415 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11416 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11417 			   tp->fw_len, tp->fw_needed);
11418 		release_firmware(tp->fw);
11419 		tp->fw = NULL;
11420 		return -EINVAL;
11421 	}
11422 
11423 	/* We no longer need firmware; we have it. */
11424 	tp->fw_needed = NULL;
11425 	return 0;
11426 }
11427 
tg3_irq_count(struct tg3 * tp)11428 static u32 tg3_irq_count(struct tg3 *tp)
11429 {
11430 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11431 
11432 	if (irq_cnt > 1) {
11433 		/* We want as many rx rings enabled as there are cpus.
11434 		 * In multiqueue MSI-X mode, the first MSI-X vector
11435 		 * only deals with link interrupts, etc, so we add
11436 		 * one to the number of vectors we are requesting.
11437 		 */
11438 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11439 	}
11440 
11441 	return irq_cnt;
11442 }
11443 
tg3_enable_msix(struct tg3 * tp)11444 static bool tg3_enable_msix(struct tg3 *tp)
11445 {
11446 	int i, rc;
11447 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11448 
11449 	tp->txq_cnt = tp->txq_req;
11450 	tp->rxq_cnt = tp->rxq_req;
11451 	if (!tp->rxq_cnt)
11452 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11453 	if (tp->rxq_cnt > tp->rxq_max)
11454 		tp->rxq_cnt = tp->rxq_max;
11455 
11456 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11457 	 * scheduling of the TX rings can cause starvation of rings with
11458 	 * small packets when other rings have TSO or jumbo packets.
11459 	 */
11460 	if (!tp->txq_req)
11461 		tp->txq_cnt = 1;
11462 
11463 	tp->irq_cnt = tg3_irq_count(tp);
11464 
11465 	for (i = 0; i < tp->irq_max; i++) {
11466 		msix_ent[i].entry  = i;
11467 		msix_ent[i].vector = 0;
11468 	}
11469 
11470 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11471 	if (rc < 0) {
11472 		return false;
11473 	} else if (rc < tp->irq_cnt) {
11474 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11475 			      tp->irq_cnt, rc);
11476 		tp->irq_cnt = rc;
11477 		tp->rxq_cnt = max(rc - 1, 1);
11478 		if (tp->txq_cnt)
11479 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11480 	}
11481 
11482 	for (i = 0; i < tp->irq_max; i++)
11483 		tp->napi[i].irq_vec = msix_ent[i].vector;
11484 
11485 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11486 		pci_disable_msix(tp->pdev);
11487 		return false;
11488 	}
11489 
11490 	if (tp->irq_cnt == 1)
11491 		return true;
11492 
11493 	tg3_flag_set(tp, ENABLE_RSS);
11494 
11495 	if (tp->txq_cnt > 1)
11496 		tg3_flag_set(tp, ENABLE_TSS);
11497 
11498 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11499 
11500 	return true;
11501 }
11502 
tg3_ints_init(struct tg3 * tp)11503 static void tg3_ints_init(struct tg3 *tp)
11504 {
11505 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11506 	    !tg3_flag(tp, TAGGED_STATUS)) {
11507 		/* All MSI supporting chips should support tagged
11508 		 * status.  Assert that this is the case.
11509 		 */
11510 		netdev_warn(tp->dev,
11511 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11512 		goto defcfg;
11513 	}
11514 
11515 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11516 		tg3_flag_set(tp, USING_MSIX);
11517 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11518 		tg3_flag_set(tp, USING_MSI);
11519 
11520 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11521 		u32 msi_mode = tr32(MSGINT_MODE);
11522 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11523 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11524 		if (!tg3_flag(tp, 1SHOT_MSI))
11525 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11526 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11527 	}
11528 defcfg:
11529 	if (!tg3_flag(tp, USING_MSIX)) {
11530 		tp->irq_cnt = 1;
11531 		tp->napi[0].irq_vec = tp->pdev->irq;
11532 	}
11533 
11534 	if (tp->irq_cnt == 1) {
11535 		tp->txq_cnt = 1;
11536 		tp->rxq_cnt = 1;
11537 		netif_set_real_num_tx_queues(tp->dev, 1);
11538 		netif_set_real_num_rx_queues(tp->dev, 1);
11539 	}
11540 }
11541 
tg3_ints_fini(struct tg3 * tp)11542 static void tg3_ints_fini(struct tg3 *tp)
11543 {
11544 	if (tg3_flag(tp, USING_MSIX))
11545 		pci_disable_msix(tp->pdev);
11546 	else if (tg3_flag(tp, USING_MSI))
11547 		pci_disable_msi(tp->pdev);
11548 	tg3_flag_clear(tp, USING_MSI);
11549 	tg3_flag_clear(tp, USING_MSIX);
11550 	tg3_flag_clear(tp, ENABLE_RSS);
11551 	tg3_flag_clear(tp, ENABLE_TSS);
11552 }
11553 
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11554 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11555 		     bool init)
11556 {
11557 	struct net_device *dev = tp->dev;
11558 	int i, err;
11559 
11560 	/*
11561 	 * Setup interrupts first so we know how
11562 	 * many NAPI resources to allocate
11563 	 */
11564 	tg3_ints_init(tp);
11565 
11566 	tg3_rss_check_indir_tbl(tp);
11567 
11568 	/* The placement of this call is tied
11569 	 * to the setup and use of Host TX descriptors.
11570 	 */
11571 	err = tg3_alloc_consistent(tp);
11572 	if (err)
11573 		goto out_ints_fini;
11574 
11575 	tg3_napi_init(tp);
11576 
11577 	tg3_napi_enable(tp);
11578 
11579 	for (i = 0; i < tp->irq_cnt; i++) {
11580 		err = tg3_request_irq(tp, i);
11581 		if (err) {
11582 			for (i--; i >= 0; i--) {
11583 				struct tg3_napi *tnapi = &tp->napi[i];
11584 
11585 				free_irq(tnapi->irq_vec, tnapi);
11586 			}
11587 			goto out_napi_fini;
11588 		}
11589 	}
11590 
11591 	tg3_full_lock(tp, 0);
11592 
11593 	if (init)
11594 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11595 
11596 	err = tg3_init_hw(tp, reset_phy);
11597 	if (err) {
11598 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11599 		tg3_free_rings(tp);
11600 	}
11601 
11602 	tg3_full_unlock(tp);
11603 
11604 	if (err)
11605 		goto out_free_irq;
11606 
11607 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11608 		err = tg3_test_msi(tp);
11609 
11610 		if (err) {
11611 			tg3_full_lock(tp, 0);
11612 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11613 			tg3_free_rings(tp);
11614 			tg3_full_unlock(tp);
11615 
11616 			goto out_napi_fini;
11617 		}
11618 
11619 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11620 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11621 
11622 			tw32(PCIE_TRANSACTION_CFG,
11623 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11624 		}
11625 	}
11626 
11627 	tg3_phy_start(tp);
11628 
11629 	tg3_hwmon_open(tp);
11630 
11631 	tg3_full_lock(tp, 0);
11632 
11633 	tg3_timer_start(tp);
11634 	tg3_flag_set(tp, INIT_COMPLETE);
11635 	tg3_enable_ints(tp);
11636 
11637 	tg3_ptp_resume(tp);
11638 
11639 	tg3_full_unlock(tp);
11640 
11641 	netif_tx_start_all_queues(dev);
11642 
11643 	/*
11644 	 * Reset loopback feature if it was turned on while the device was down
11645 	 * make sure that it's installed properly now.
11646 	 */
11647 	if (dev->features & NETIF_F_LOOPBACK)
11648 		tg3_set_loopback(dev, dev->features);
11649 
11650 	return 0;
11651 
11652 out_free_irq:
11653 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11654 		struct tg3_napi *tnapi = &tp->napi[i];
11655 		free_irq(tnapi->irq_vec, tnapi);
11656 	}
11657 
11658 out_napi_fini:
11659 	tg3_napi_disable(tp);
11660 	tg3_napi_fini(tp);
11661 	tg3_free_consistent(tp);
11662 
11663 out_ints_fini:
11664 	tg3_ints_fini(tp);
11665 
11666 	return err;
11667 }
11668 
tg3_stop(struct tg3 * tp)11669 static void tg3_stop(struct tg3 *tp)
11670 {
11671 	int i;
11672 
11673 	tg3_reset_task_cancel(tp);
11674 	tg3_netif_stop(tp);
11675 
11676 	tg3_timer_stop(tp);
11677 
11678 	tg3_hwmon_close(tp);
11679 
11680 	tg3_phy_stop(tp);
11681 
11682 	tg3_full_lock(tp, 1);
11683 
11684 	tg3_disable_ints(tp);
11685 
11686 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11687 	tg3_free_rings(tp);
11688 	tg3_flag_clear(tp, INIT_COMPLETE);
11689 
11690 	tg3_full_unlock(tp);
11691 
11692 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11693 		struct tg3_napi *tnapi = &tp->napi[i];
11694 		free_irq(tnapi->irq_vec, tnapi);
11695 	}
11696 
11697 	tg3_ints_fini(tp);
11698 
11699 	tg3_napi_fini(tp);
11700 
11701 	tg3_free_consistent(tp);
11702 }
11703 
tg3_open(struct net_device * dev)11704 static int tg3_open(struct net_device *dev)
11705 {
11706 	struct tg3 *tp = netdev_priv(dev);
11707 	int err;
11708 
11709 	if (tp->pcierr_recovery) {
11710 		netdev_err(dev, "Failed to open device. PCI error recovery "
11711 			   "in progress\n");
11712 		return -EAGAIN;
11713 	}
11714 
11715 	if (tp->fw_needed) {
11716 		err = tg3_request_firmware(tp);
11717 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11718 			if (err) {
11719 				netdev_warn(tp->dev, "EEE capability disabled\n");
11720 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11721 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11722 				netdev_warn(tp->dev, "EEE capability restored\n");
11723 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11724 			}
11725 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11726 			if (err)
11727 				return err;
11728 		} else if (err) {
11729 			netdev_warn(tp->dev, "TSO capability disabled\n");
11730 			tg3_flag_clear(tp, TSO_CAPABLE);
11731 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11732 			netdev_notice(tp->dev, "TSO capability restored\n");
11733 			tg3_flag_set(tp, TSO_CAPABLE);
11734 		}
11735 	}
11736 
11737 	tg3_carrier_off(tp);
11738 
11739 	err = tg3_power_up(tp);
11740 	if (err)
11741 		return err;
11742 
11743 	tg3_full_lock(tp, 0);
11744 
11745 	tg3_disable_ints(tp);
11746 	tg3_flag_clear(tp, INIT_COMPLETE);
11747 
11748 	tg3_full_unlock(tp);
11749 
11750 	err = tg3_start(tp,
11751 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11752 			true, true);
11753 	if (err) {
11754 		tg3_frob_aux_power(tp, false);
11755 		pci_set_power_state(tp->pdev, PCI_D3hot);
11756 	}
11757 
11758 	return err;
11759 }
11760 
tg3_close(struct net_device * dev)11761 static int tg3_close(struct net_device *dev)
11762 {
11763 	struct tg3 *tp = netdev_priv(dev);
11764 
11765 	if (tp->pcierr_recovery) {
11766 		netdev_err(dev, "Failed to close device. PCI error recovery "
11767 			   "in progress\n");
11768 		return -EAGAIN;
11769 	}
11770 
11771 	tg3_stop(tp);
11772 
11773 	if (pci_device_is_present(tp->pdev)) {
11774 		tg3_power_down_prepare(tp);
11775 
11776 		tg3_carrier_off(tp);
11777 	}
11778 	return 0;
11779 }
11780 
get_stat64(tg3_stat64_t * val)11781 static inline u64 get_stat64(tg3_stat64_t *val)
11782 {
11783        return ((u64)val->high << 32) | ((u64)val->low);
11784 }
11785 
tg3_calc_crc_errors(struct tg3 * tp)11786 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11787 {
11788 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11789 
11790 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11791 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11792 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11793 		u32 val;
11794 
11795 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11796 			tg3_writephy(tp, MII_TG3_TEST1,
11797 				     val | MII_TG3_TEST1_CRC_EN);
11798 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11799 		} else
11800 			val = 0;
11801 
11802 		tp->phy_crc_errors += val;
11803 
11804 		return tp->phy_crc_errors;
11805 	}
11806 
11807 	return get_stat64(&hw_stats->rx_fcs_errors);
11808 }
11809 
11810 #define ESTAT_ADD(member) \
11811 	estats->member =	old_estats->member + \
11812 				get_stat64(&hw_stats->member)
11813 
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11814 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11815 {
11816 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11817 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11818 
11819 	ESTAT_ADD(rx_octets);
11820 	ESTAT_ADD(rx_fragments);
11821 	ESTAT_ADD(rx_ucast_packets);
11822 	ESTAT_ADD(rx_mcast_packets);
11823 	ESTAT_ADD(rx_bcast_packets);
11824 	ESTAT_ADD(rx_fcs_errors);
11825 	ESTAT_ADD(rx_align_errors);
11826 	ESTAT_ADD(rx_xon_pause_rcvd);
11827 	ESTAT_ADD(rx_xoff_pause_rcvd);
11828 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11829 	ESTAT_ADD(rx_xoff_entered);
11830 	ESTAT_ADD(rx_frame_too_long_errors);
11831 	ESTAT_ADD(rx_jabbers);
11832 	ESTAT_ADD(rx_undersize_packets);
11833 	ESTAT_ADD(rx_in_length_errors);
11834 	ESTAT_ADD(rx_out_length_errors);
11835 	ESTAT_ADD(rx_64_or_less_octet_packets);
11836 	ESTAT_ADD(rx_65_to_127_octet_packets);
11837 	ESTAT_ADD(rx_128_to_255_octet_packets);
11838 	ESTAT_ADD(rx_256_to_511_octet_packets);
11839 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11840 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11841 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11842 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11843 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11844 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11845 
11846 	ESTAT_ADD(tx_octets);
11847 	ESTAT_ADD(tx_collisions);
11848 	ESTAT_ADD(tx_xon_sent);
11849 	ESTAT_ADD(tx_xoff_sent);
11850 	ESTAT_ADD(tx_flow_control);
11851 	ESTAT_ADD(tx_mac_errors);
11852 	ESTAT_ADD(tx_single_collisions);
11853 	ESTAT_ADD(tx_mult_collisions);
11854 	ESTAT_ADD(tx_deferred);
11855 	ESTAT_ADD(tx_excessive_collisions);
11856 	ESTAT_ADD(tx_late_collisions);
11857 	ESTAT_ADD(tx_collide_2times);
11858 	ESTAT_ADD(tx_collide_3times);
11859 	ESTAT_ADD(tx_collide_4times);
11860 	ESTAT_ADD(tx_collide_5times);
11861 	ESTAT_ADD(tx_collide_6times);
11862 	ESTAT_ADD(tx_collide_7times);
11863 	ESTAT_ADD(tx_collide_8times);
11864 	ESTAT_ADD(tx_collide_9times);
11865 	ESTAT_ADD(tx_collide_10times);
11866 	ESTAT_ADD(tx_collide_11times);
11867 	ESTAT_ADD(tx_collide_12times);
11868 	ESTAT_ADD(tx_collide_13times);
11869 	ESTAT_ADD(tx_collide_14times);
11870 	ESTAT_ADD(tx_collide_15times);
11871 	ESTAT_ADD(tx_ucast_packets);
11872 	ESTAT_ADD(tx_mcast_packets);
11873 	ESTAT_ADD(tx_bcast_packets);
11874 	ESTAT_ADD(tx_carrier_sense_errors);
11875 	ESTAT_ADD(tx_discards);
11876 	ESTAT_ADD(tx_errors);
11877 
11878 	ESTAT_ADD(dma_writeq_full);
11879 	ESTAT_ADD(dma_write_prioq_full);
11880 	ESTAT_ADD(rxbds_empty);
11881 	ESTAT_ADD(rx_discards);
11882 	ESTAT_ADD(rx_errors);
11883 	ESTAT_ADD(rx_threshold_hit);
11884 
11885 	ESTAT_ADD(dma_readq_full);
11886 	ESTAT_ADD(dma_read_prioq_full);
11887 	ESTAT_ADD(tx_comp_queue_full);
11888 
11889 	ESTAT_ADD(ring_set_send_prod_index);
11890 	ESTAT_ADD(ring_status_update);
11891 	ESTAT_ADD(nic_irqs);
11892 	ESTAT_ADD(nic_avoided_irqs);
11893 	ESTAT_ADD(nic_tx_threshold_hit);
11894 
11895 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11896 }
11897 
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11898 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11899 {
11900 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11901 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11902 
11903 	stats->rx_packets = old_stats->rx_packets +
11904 		get_stat64(&hw_stats->rx_ucast_packets) +
11905 		get_stat64(&hw_stats->rx_mcast_packets) +
11906 		get_stat64(&hw_stats->rx_bcast_packets);
11907 
11908 	stats->tx_packets = old_stats->tx_packets +
11909 		get_stat64(&hw_stats->tx_ucast_packets) +
11910 		get_stat64(&hw_stats->tx_mcast_packets) +
11911 		get_stat64(&hw_stats->tx_bcast_packets);
11912 
11913 	stats->rx_bytes = old_stats->rx_bytes +
11914 		get_stat64(&hw_stats->rx_octets);
11915 	stats->tx_bytes = old_stats->tx_bytes +
11916 		get_stat64(&hw_stats->tx_octets);
11917 
11918 	stats->rx_errors = old_stats->rx_errors +
11919 		get_stat64(&hw_stats->rx_errors);
11920 	stats->tx_errors = old_stats->tx_errors +
11921 		get_stat64(&hw_stats->tx_errors) +
11922 		get_stat64(&hw_stats->tx_mac_errors) +
11923 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11924 		get_stat64(&hw_stats->tx_discards);
11925 
11926 	stats->multicast = old_stats->multicast +
11927 		get_stat64(&hw_stats->rx_mcast_packets);
11928 	stats->collisions = old_stats->collisions +
11929 		get_stat64(&hw_stats->tx_collisions);
11930 
11931 	stats->rx_length_errors = old_stats->rx_length_errors +
11932 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11933 		get_stat64(&hw_stats->rx_undersize_packets);
11934 
11935 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11936 		get_stat64(&hw_stats->rx_align_errors);
11937 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11938 		get_stat64(&hw_stats->tx_discards);
11939 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11940 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11941 
11942 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11943 		tg3_calc_crc_errors(tp);
11944 
11945 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11946 		get_stat64(&hw_stats->rx_discards);
11947 
11948 	stats->rx_dropped = tp->rx_dropped;
11949 	stats->tx_dropped = tp->tx_dropped;
11950 }
11951 
tg3_get_regs_len(struct net_device * dev)11952 static int tg3_get_regs_len(struct net_device *dev)
11953 {
11954 	return TG3_REG_BLK_SIZE;
11955 }
11956 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)11957 static void tg3_get_regs(struct net_device *dev,
11958 		struct ethtool_regs *regs, void *_p)
11959 {
11960 	struct tg3 *tp = netdev_priv(dev);
11961 
11962 	regs->version = 0;
11963 
11964 	memset(_p, 0, TG3_REG_BLK_SIZE);
11965 
11966 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11967 		return;
11968 
11969 	tg3_full_lock(tp, 0);
11970 
11971 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11972 
11973 	tg3_full_unlock(tp);
11974 }
11975 
tg3_get_eeprom_len(struct net_device * dev)11976 static int tg3_get_eeprom_len(struct net_device *dev)
11977 {
11978 	struct tg3 *tp = netdev_priv(dev);
11979 
11980 	return tp->nvram_size;
11981 }
11982 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)11983 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11984 {
11985 	struct tg3 *tp = netdev_priv(dev);
11986 	int ret, cpmu_restore = 0;
11987 	u8  *pd;
11988 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11989 	__be32 val;
11990 
11991 	if (tg3_flag(tp, NO_NVRAM))
11992 		return -EINVAL;
11993 
11994 	offset = eeprom->offset;
11995 	len = eeprom->len;
11996 	eeprom->len = 0;
11997 
11998 	eeprom->magic = TG3_EEPROM_MAGIC;
11999 
12000 	/* Override clock, link aware and link idle modes */
12001 	if (tg3_flag(tp, CPMU_PRESENT)) {
12002 		cpmu_val = tr32(TG3_CPMU_CTRL);
12003 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12004 				CPMU_CTRL_LINK_IDLE_MODE)) {
12005 			tw32(TG3_CPMU_CTRL, cpmu_val &
12006 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12007 					     CPMU_CTRL_LINK_IDLE_MODE));
12008 			cpmu_restore = 1;
12009 		}
12010 	}
12011 	tg3_override_clk(tp);
12012 
12013 	if (offset & 3) {
12014 		/* adjustments to start on required 4 byte boundary */
12015 		b_offset = offset & 3;
12016 		b_count = 4 - b_offset;
12017 		if (b_count > len) {
12018 			/* i.e. offset=1 len=2 */
12019 			b_count = len;
12020 		}
12021 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12022 		if (ret)
12023 			goto eeprom_done;
12024 		memcpy(data, ((char *)&val) + b_offset, b_count);
12025 		len -= b_count;
12026 		offset += b_count;
12027 		eeprom->len += b_count;
12028 	}
12029 
12030 	/* read bytes up to the last 4 byte boundary */
12031 	pd = &data[eeprom->len];
12032 	for (i = 0; i < (len - (len & 3)); i += 4) {
12033 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12034 		if (ret) {
12035 			if (i)
12036 				i -= 4;
12037 			eeprom->len += i;
12038 			goto eeprom_done;
12039 		}
12040 		memcpy(pd + i, &val, 4);
12041 		if (need_resched()) {
12042 			if (signal_pending(current)) {
12043 				eeprom->len += i;
12044 				ret = -EINTR;
12045 				goto eeprom_done;
12046 			}
12047 			cond_resched();
12048 		}
12049 	}
12050 	eeprom->len += i;
12051 
12052 	if (len & 3) {
12053 		/* read last bytes not ending on 4 byte boundary */
12054 		pd = &data[eeprom->len];
12055 		b_count = len & 3;
12056 		b_offset = offset + len - b_count;
12057 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12058 		if (ret)
12059 			goto eeprom_done;
12060 		memcpy(pd, &val, b_count);
12061 		eeprom->len += b_count;
12062 	}
12063 	ret = 0;
12064 
12065 eeprom_done:
12066 	/* Restore clock, link aware and link idle modes */
12067 	tg3_restore_clk(tp);
12068 	if (cpmu_restore)
12069 		tw32(TG3_CPMU_CTRL, cpmu_val);
12070 
12071 	return ret;
12072 }
12073 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12074 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12075 {
12076 	struct tg3 *tp = netdev_priv(dev);
12077 	int ret;
12078 	u32 offset, len, b_offset, odd_len;
12079 	u8 *buf;
12080 	__be32 start = 0, end;
12081 
12082 	if (tg3_flag(tp, NO_NVRAM) ||
12083 	    eeprom->magic != TG3_EEPROM_MAGIC)
12084 		return -EINVAL;
12085 
12086 	offset = eeprom->offset;
12087 	len = eeprom->len;
12088 
12089 	if ((b_offset = (offset & 3))) {
12090 		/* adjustments to start on required 4 byte boundary */
12091 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12092 		if (ret)
12093 			return ret;
12094 		len += b_offset;
12095 		offset &= ~3;
12096 		if (len < 4)
12097 			len = 4;
12098 	}
12099 
12100 	odd_len = 0;
12101 	if (len & 3) {
12102 		/* adjustments to end on required 4 byte boundary */
12103 		odd_len = 1;
12104 		len = (len + 3) & ~3;
12105 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12106 		if (ret)
12107 			return ret;
12108 	}
12109 
12110 	buf = data;
12111 	if (b_offset || odd_len) {
12112 		buf = kmalloc(len, GFP_KERNEL);
12113 		if (!buf)
12114 			return -ENOMEM;
12115 		if (b_offset)
12116 			memcpy(buf, &start, 4);
12117 		if (odd_len)
12118 			memcpy(buf+len-4, &end, 4);
12119 		memcpy(buf + b_offset, data, eeprom->len);
12120 	}
12121 
12122 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12123 
12124 	if (buf != data)
12125 		kfree(buf);
12126 
12127 	return ret;
12128 }
12129 
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12130 static int tg3_get_link_ksettings(struct net_device *dev,
12131 				  struct ethtool_link_ksettings *cmd)
12132 {
12133 	struct tg3 *tp = netdev_priv(dev);
12134 	u32 supported, advertising;
12135 
12136 	if (tg3_flag(tp, USE_PHYLIB)) {
12137 		struct phy_device *phydev;
12138 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12139 			return -EAGAIN;
12140 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12141 		phy_ethtool_ksettings_get(phydev, cmd);
12142 
12143 		return 0;
12144 	}
12145 
12146 	supported = (SUPPORTED_Autoneg);
12147 
12148 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12149 		supported |= (SUPPORTED_1000baseT_Half |
12150 			      SUPPORTED_1000baseT_Full);
12151 
12152 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12153 		supported |= (SUPPORTED_100baseT_Half |
12154 			      SUPPORTED_100baseT_Full |
12155 			      SUPPORTED_10baseT_Half |
12156 			      SUPPORTED_10baseT_Full |
12157 			      SUPPORTED_TP);
12158 		cmd->base.port = PORT_TP;
12159 	} else {
12160 		supported |= SUPPORTED_FIBRE;
12161 		cmd->base.port = PORT_FIBRE;
12162 	}
12163 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12164 						supported);
12165 
12166 	advertising = tp->link_config.advertising;
12167 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12168 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12169 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12170 				advertising |= ADVERTISED_Pause;
12171 			} else {
12172 				advertising |= ADVERTISED_Pause |
12173 					ADVERTISED_Asym_Pause;
12174 			}
12175 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176 			advertising |= ADVERTISED_Asym_Pause;
12177 		}
12178 	}
12179 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12180 						advertising);
12181 
12182 	if (netif_running(dev) && tp->link_up) {
12183 		cmd->base.speed = tp->link_config.active_speed;
12184 		cmd->base.duplex = tp->link_config.active_duplex;
12185 		ethtool_convert_legacy_u32_to_link_mode(
12186 			cmd->link_modes.lp_advertising,
12187 			tp->link_config.rmt_adv);
12188 
12189 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12190 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12191 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12192 			else
12193 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12194 		}
12195 	} else {
12196 		cmd->base.speed = SPEED_UNKNOWN;
12197 		cmd->base.duplex = DUPLEX_UNKNOWN;
12198 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12199 	}
12200 	cmd->base.phy_address = tp->phy_addr;
12201 	cmd->base.autoneg = tp->link_config.autoneg;
12202 	return 0;
12203 }
12204 
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12205 static int tg3_set_link_ksettings(struct net_device *dev,
12206 				  const struct ethtool_link_ksettings *cmd)
12207 {
12208 	struct tg3 *tp = netdev_priv(dev);
12209 	u32 speed = cmd->base.speed;
12210 	u32 advertising;
12211 
12212 	if (tg3_flag(tp, USE_PHYLIB)) {
12213 		struct phy_device *phydev;
12214 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12215 			return -EAGAIN;
12216 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12217 		return phy_ethtool_ksettings_set(phydev, cmd);
12218 	}
12219 
12220 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12221 	    cmd->base.autoneg != AUTONEG_DISABLE)
12222 		return -EINVAL;
12223 
12224 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12225 	    cmd->base.duplex != DUPLEX_FULL &&
12226 	    cmd->base.duplex != DUPLEX_HALF)
12227 		return -EINVAL;
12228 
12229 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12230 						cmd->link_modes.advertising);
12231 
12232 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12233 		u32 mask = ADVERTISED_Autoneg |
12234 			   ADVERTISED_Pause |
12235 			   ADVERTISED_Asym_Pause;
12236 
12237 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12238 			mask |= ADVERTISED_1000baseT_Half |
12239 				ADVERTISED_1000baseT_Full;
12240 
12241 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12242 			mask |= ADVERTISED_100baseT_Half |
12243 				ADVERTISED_100baseT_Full |
12244 				ADVERTISED_10baseT_Half |
12245 				ADVERTISED_10baseT_Full |
12246 				ADVERTISED_TP;
12247 		else
12248 			mask |= ADVERTISED_FIBRE;
12249 
12250 		if (advertising & ~mask)
12251 			return -EINVAL;
12252 
12253 		mask &= (ADVERTISED_1000baseT_Half |
12254 			 ADVERTISED_1000baseT_Full |
12255 			 ADVERTISED_100baseT_Half |
12256 			 ADVERTISED_100baseT_Full |
12257 			 ADVERTISED_10baseT_Half |
12258 			 ADVERTISED_10baseT_Full);
12259 
12260 		advertising &= mask;
12261 	} else {
12262 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12263 			if (speed != SPEED_1000)
12264 				return -EINVAL;
12265 
12266 			if (cmd->base.duplex != DUPLEX_FULL)
12267 				return -EINVAL;
12268 		} else {
12269 			if (speed != SPEED_100 &&
12270 			    speed != SPEED_10)
12271 				return -EINVAL;
12272 		}
12273 	}
12274 
12275 	tg3_full_lock(tp, 0);
12276 
12277 	tp->link_config.autoneg = cmd->base.autoneg;
12278 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12279 		tp->link_config.advertising = (advertising |
12280 					      ADVERTISED_Autoneg);
12281 		tp->link_config.speed = SPEED_UNKNOWN;
12282 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12283 	} else {
12284 		tp->link_config.advertising = 0;
12285 		tp->link_config.speed = speed;
12286 		tp->link_config.duplex = cmd->base.duplex;
12287 	}
12288 
12289 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12290 
12291 	tg3_warn_mgmt_link_flap(tp);
12292 
12293 	if (netif_running(dev))
12294 		tg3_setup_phy(tp, true);
12295 
12296 	tg3_full_unlock(tp);
12297 
12298 	return 0;
12299 }
12300 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12301 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12302 {
12303 	struct tg3 *tp = netdev_priv(dev);
12304 
12305 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12306 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12307 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12308 }
12309 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12310 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12311 {
12312 	struct tg3 *tp = netdev_priv(dev);
12313 
12314 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12315 		wol->supported = WAKE_MAGIC;
12316 	else
12317 		wol->supported = 0;
12318 	wol->wolopts = 0;
12319 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12320 		wol->wolopts = WAKE_MAGIC;
12321 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12322 }
12323 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12324 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12325 {
12326 	struct tg3 *tp = netdev_priv(dev);
12327 	struct device *dp = &tp->pdev->dev;
12328 
12329 	if (wol->wolopts & ~WAKE_MAGIC)
12330 		return -EINVAL;
12331 	if ((wol->wolopts & WAKE_MAGIC) &&
12332 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12333 		return -EINVAL;
12334 
12335 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12336 
12337 	if (device_may_wakeup(dp))
12338 		tg3_flag_set(tp, WOL_ENABLE);
12339 	else
12340 		tg3_flag_clear(tp, WOL_ENABLE);
12341 
12342 	return 0;
12343 }
12344 
tg3_get_msglevel(struct net_device * dev)12345 static u32 tg3_get_msglevel(struct net_device *dev)
12346 {
12347 	struct tg3 *tp = netdev_priv(dev);
12348 	return tp->msg_enable;
12349 }
12350 
tg3_set_msglevel(struct net_device * dev,u32 value)12351 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12352 {
12353 	struct tg3 *tp = netdev_priv(dev);
12354 	tp->msg_enable = value;
12355 }
12356 
tg3_nway_reset(struct net_device * dev)12357 static int tg3_nway_reset(struct net_device *dev)
12358 {
12359 	struct tg3 *tp = netdev_priv(dev);
12360 	int r;
12361 
12362 	if (!netif_running(dev))
12363 		return -EAGAIN;
12364 
12365 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12366 		return -EINVAL;
12367 
12368 	tg3_warn_mgmt_link_flap(tp);
12369 
12370 	if (tg3_flag(tp, USE_PHYLIB)) {
12371 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12372 			return -EAGAIN;
12373 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12374 	} else {
12375 		u32 bmcr;
12376 
12377 		spin_lock_bh(&tp->lock);
12378 		r = -EINVAL;
12379 		tg3_readphy(tp, MII_BMCR, &bmcr);
12380 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12381 		    ((bmcr & BMCR_ANENABLE) ||
12382 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12383 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12384 						   BMCR_ANENABLE);
12385 			r = 0;
12386 		}
12387 		spin_unlock_bh(&tp->lock);
12388 	}
12389 
12390 	return r;
12391 }
12392 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12393 static void tg3_get_ringparam(struct net_device *dev,
12394 			      struct ethtool_ringparam *ering,
12395 			      struct kernel_ethtool_ringparam *kernel_ering,
12396 			      struct netlink_ext_ack *extack)
12397 {
12398 	struct tg3 *tp = netdev_priv(dev);
12399 
12400 	ering->rx_max_pending = tp->rx_std_ring_mask;
12401 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12402 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12403 	else
12404 		ering->rx_jumbo_max_pending = 0;
12405 
12406 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12407 
12408 	ering->rx_pending = tp->rx_pending;
12409 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12410 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12411 	else
12412 		ering->rx_jumbo_pending = 0;
12413 
12414 	ering->tx_pending = tp->napi[0].tx_pending;
12415 }
12416 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12417 static int tg3_set_ringparam(struct net_device *dev,
12418 			     struct ethtool_ringparam *ering,
12419 			     struct kernel_ethtool_ringparam *kernel_ering,
12420 			     struct netlink_ext_ack *extack)
12421 {
12422 	struct tg3 *tp = netdev_priv(dev);
12423 	int i, irq_sync = 0, err = 0;
12424 	bool reset_phy = false;
12425 
12426 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430 	    (tg3_flag(tp, TSO_BUG) &&
12431 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12432 		return -EINVAL;
12433 
12434 	if (netif_running(dev)) {
12435 		tg3_phy_stop(tp);
12436 		tg3_netif_stop(tp);
12437 		irq_sync = 1;
12438 	}
12439 
12440 	tg3_full_lock(tp, irq_sync);
12441 
12442 	tp->rx_pending = ering->rx_pending;
12443 
12444 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12445 	    tp->rx_pending > 63)
12446 		tp->rx_pending = 63;
12447 
12448 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12450 
12451 	for (i = 0; i < tp->irq_max; i++)
12452 		tp->napi[i].tx_pending = ering->tx_pending;
12453 
12454 	if (netif_running(dev)) {
12455 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456 		/* Reset PHY to avoid PHY lock up */
12457 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12458 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12459 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12460 			reset_phy = true;
12461 
12462 		err = tg3_restart_hw(tp, reset_phy);
12463 		if (!err)
12464 			tg3_netif_start(tp);
12465 	}
12466 
12467 	tg3_full_unlock(tp);
12468 
12469 	if (irq_sync && !err)
12470 		tg3_phy_start(tp);
12471 
12472 	return err;
12473 }
12474 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12475 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12476 {
12477 	struct tg3 *tp = netdev_priv(dev);
12478 
12479 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12480 
12481 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12482 		epause->rx_pause = 1;
12483 	else
12484 		epause->rx_pause = 0;
12485 
12486 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12487 		epause->tx_pause = 1;
12488 	else
12489 		epause->tx_pause = 0;
12490 }
12491 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12492 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12493 {
12494 	struct tg3 *tp = netdev_priv(dev);
12495 	int err = 0;
12496 	bool reset_phy = false;
12497 
12498 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12499 		tg3_warn_mgmt_link_flap(tp);
12500 
12501 	if (tg3_flag(tp, USE_PHYLIB)) {
12502 		struct phy_device *phydev;
12503 
12504 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12505 
12506 		if (!phy_validate_pause(phydev, epause))
12507 			return -EINVAL;
12508 
12509 		tp->link_config.flowctrl = 0;
12510 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12511 		if (epause->rx_pause) {
12512 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12513 
12514 			if (epause->tx_pause) {
12515 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12516 			}
12517 		} else if (epause->tx_pause) {
12518 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12519 		}
12520 
12521 		if (epause->autoneg)
12522 			tg3_flag_set(tp, PAUSE_AUTONEG);
12523 		else
12524 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12525 
12526 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12527 			if (phydev->autoneg) {
12528 				/* phy_set_asym_pause() will
12529 				 * renegotiate the link to inform our
12530 				 * link partner of our flow control
12531 				 * settings, even if the flow control
12532 				 * is forced.  Let tg3_adjust_link()
12533 				 * do the final flow control setup.
12534 				 */
12535 				return 0;
12536 			}
12537 
12538 			if (!epause->autoneg)
12539 				tg3_setup_flow_control(tp, 0, 0);
12540 		}
12541 	} else {
12542 		int irq_sync = 0;
12543 
12544 		if (netif_running(dev)) {
12545 			tg3_netif_stop(tp);
12546 			irq_sync = 1;
12547 		}
12548 
12549 		tg3_full_lock(tp, irq_sync);
12550 
12551 		if (epause->autoneg)
12552 			tg3_flag_set(tp, PAUSE_AUTONEG);
12553 		else
12554 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12555 		if (epause->rx_pause)
12556 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12557 		else
12558 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12559 		if (epause->tx_pause)
12560 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12561 		else
12562 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12563 
12564 		if (netif_running(dev)) {
12565 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12566 			/* Reset PHY to avoid PHY lock up */
12567 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12568 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12569 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12570 				reset_phy = true;
12571 
12572 			err = tg3_restart_hw(tp, reset_phy);
12573 			if (!err)
12574 				tg3_netif_start(tp);
12575 		}
12576 
12577 		tg3_full_unlock(tp);
12578 	}
12579 
12580 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12581 
12582 	return err;
12583 }
12584 
tg3_get_sset_count(struct net_device * dev,int sset)12585 static int tg3_get_sset_count(struct net_device *dev, int sset)
12586 {
12587 	switch (sset) {
12588 	case ETH_SS_TEST:
12589 		return TG3_NUM_TEST;
12590 	case ETH_SS_STATS:
12591 		return TG3_NUM_STATS;
12592 	default:
12593 		return -EOPNOTSUPP;
12594 	}
12595 }
12596 
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12597 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12598 			 u32 *rules __always_unused)
12599 {
12600 	struct tg3 *tp = netdev_priv(dev);
12601 
12602 	if (!tg3_flag(tp, SUPPORT_MSIX))
12603 		return -EOPNOTSUPP;
12604 
12605 	switch (info->cmd) {
12606 	case ETHTOOL_GRXRINGS:
12607 		if (netif_running(tp->dev))
12608 			info->data = tp->rxq_cnt;
12609 		else {
12610 			info->data = num_online_cpus();
12611 			if (info->data > TG3_RSS_MAX_NUM_QS)
12612 				info->data = TG3_RSS_MAX_NUM_QS;
12613 		}
12614 
12615 		return 0;
12616 
12617 	default:
12618 		return -EOPNOTSUPP;
12619 	}
12620 }
12621 
tg3_get_rxfh_indir_size(struct net_device * dev)12622 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12623 {
12624 	u32 size = 0;
12625 	struct tg3 *tp = netdev_priv(dev);
12626 
12627 	if (tg3_flag(tp, SUPPORT_MSIX))
12628 		size = TG3_RSS_INDIR_TBL_SIZE;
12629 
12630 	return size;
12631 }
12632 
tg3_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)12633 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12634 {
12635 	struct tg3 *tp = netdev_priv(dev);
12636 	int i;
12637 
12638 	if (hfunc)
12639 		*hfunc = ETH_RSS_HASH_TOP;
12640 	if (!indir)
12641 		return 0;
12642 
12643 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12644 		indir[i] = tp->rss_ind_tbl[i];
12645 
12646 	return 0;
12647 }
12648 
tg3_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)12649 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12650 			const u8 hfunc)
12651 {
12652 	struct tg3 *tp = netdev_priv(dev);
12653 	size_t i;
12654 
12655 	/* We require at least one supported parameter to be changed and no
12656 	 * change in any of the unsupported parameters
12657 	 */
12658 	if (key ||
12659 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12660 		return -EOPNOTSUPP;
12661 
12662 	if (!indir)
12663 		return 0;
12664 
12665 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12666 		tp->rss_ind_tbl[i] = indir[i];
12667 
12668 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12669 		return 0;
12670 
12671 	/* It is legal to write the indirection
12672 	 * table while the device is running.
12673 	 */
12674 	tg3_full_lock(tp, 0);
12675 	tg3_rss_write_indir_tbl(tp);
12676 	tg3_full_unlock(tp);
12677 
12678 	return 0;
12679 }
12680 
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12681 static void tg3_get_channels(struct net_device *dev,
12682 			     struct ethtool_channels *channel)
12683 {
12684 	struct tg3 *tp = netdev_priv(dev);
12685 	u32 deflt_qs = netif_get_num_default_rss_queues();
12686 
12687 	channel->max_rx = tp->rxq_max;
12688 	channel->max_tx = tp->txq_max;
12689 
12690 	if (netif_running(dev)) {
12691 		channel->rx_count = tp->rxq_cnt;
12692 		channel->tx_count = tp->txq_cnt;
12693 	} else {
12694 		if (tp->rxq_req)
12695 			channel->rx_count = tp->rxq_req;
12696 		else
12697 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12698 
12699 		if (tp->txq_req)
12700 			channel->tx_count = tp->txq_req;
12701 		else
12702 			channel->tx_count = min(deflt_qs, tp->txq_max);
12703 	}
12704 }
12705 
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12706 static int tg3_set_channels(struct net_device *dev,
12707 			    struct ethtool_channels *channel)
12708 {
12709 	struct tg3 *tp = netdev_priv(dev);
12710 
12711 	if (!tg3_flag(tp, SUPPORT_MSIX))
12712 		return -EOPNOTSUPP;
12713 
12714 	if (channel->rx_count > tp->rxq_max ||
12715 	    channel->tx_count > tp->txq_max)
12716 		return -EINVAL;
12717 
12718 	tp->rxq_req = channel->rx_count;
12719 	tp->txq_req = channel->tx_count;
12720 
12721 	if (!netif_running(dev))
12722 		return 0;
12723 
12724 	tg3_stop(tp);
12725 
12726 	tg3_carrier_off(tp);
12727 
12728 	tg3_start(tp, true, false, false);
12729 
12730 	return 0;
12731 }
12732 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12733 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12734 {
12735 	switch (stringset) {
12736 	case ETH_SS_STATS:
12737 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12738 		break;
12739 	case ETH_SS_TEST:
12740 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12741 		break;
12742 	default:
12743 		WARN_ON(1);	/* we need a WARN() */
12744 		break;
12745 	}
12746 }
12747 
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12748 static int tg3_set_phys_id(struct net_device *dev,
12749 			    enum ethtool_phys_id_state state)
12750 {
12751 	struct tg3 *tp = netdev_priv(dev);
12752 
12753 	switch (state) {
12754 	case ETHTOOL_ID_ACTIVE:
12755 		return 1;	/* cycle on/off once per second */
12756 
12757 	case ETHTOOL_ID_ON:
12758 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759 		     LED_CTRL_1000MBPS_ON |
12760 		     LED_CTRL_100MBPS_ON |
12761 		     LED_CTRL_10MBPS_ON |
12762 		     LED_CTRL_TRAFFIC_OVERRIDE |
12763 		     LED_CTRL_TRAFFIC_BLINK |
12764 		     LED_CTRL_TRAFFIC_LED);
12765 		break;
12766 
12767 	case ETHTOOL_ID_OFF:
12768 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769 		     LED_CTRL_TRAFFIC_OVERRIDE);
12770 		break;
12771 
12772 	case ETHTOOL_ID_INACTIVE:
12773 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12774 		break;
12775 	}
12776 
12777 	return 0;
12778 }
12779 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781 				   struct ethtool_stats *estats, u64 *tmp_stats)
12782 {
12783 	struct tg3 *tp = netdev_priv(dev);
12784 
12785 	if (tp->hw_stats)
12786 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12787 	else
12788 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12789 }
12790 
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12792 {
12793 	int i;
12794 	__be32 *buf;
12795 	u32 offset = 0, len = 0;
12796 	u32 magic, val;
12797 
12798 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12799 		return NULL;
12800 
12801 	if (magic == TG3_EEPROM_MAGIC) {
12802 		for (offset = TG3_NVM_DIR_START;
12803 		     offset < TG3_NVM_DIR_END;
12804 		     offset += TG3_NVM_DIRENT_SIZE) {
12805 			if (tg3_nvram_read(tp, offset, &val))
12806 				return NULL;
12807 
12808 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809 			    TG3_NVM_DIRTYPE_EXTVPD)
12810 				break;
12811 		}
12812 
12813 		if (offset != TG3_NVM_DIR_END) {
12814 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815 			if (tg3_nvram_read(tp, offset + 4, &offset))
12816 				return NULL;
12817 
12818 			offset = tg3_nvram_logical_addr(tp, offset);
12819 		}
12820 
12821 		if (!offset || !len) {
12822 			offset = TG3_NVM_VPD_OFF;
12823 			len = TG3_NVM_VPD_LEN;
12824 		}
12825 
12826 		buf = kmalloc(len, GFP_KERNEL);
12827 		if (!buf)
12828 			return NULL;
12829 
12830 		for (i = 0; i < len; i += 4) {
12831 			/* The data is in little-endian format in NVRAM.
12832 			 * Use the big-endian read routines to preserve
12833 			 * the byte order as it exists in NVRAM.
12834 			 */
12835 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12836 				goto error;
12837 		}
12838 		*vpdlen = len;
12839 	} else {
12840 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12841 		if (IS_ERR(buf))
12842 			return NULL;
12843 	}
12844 
12845 	return buf;
12846 
12847 error:
12848 	kfree(buf);
12849 	return NULL;
12850 }
12851 
12852 #define NVRAM_TEST_SIZE 0x100
12853 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12854 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12855 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12856 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12857 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12858 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12859 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12860 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12861 
tg3_test_nvram(struct tg3 * tp)12862 static int tg3_test_nvram(struct tg3 *tp)
12863 {
12864 	u32 csum, magic;
12865 	__be32 *buf;
12866 	int i, j, k, err = 0, size;
12867 	unsigned int len;
12868 
12869 	if (tg3_flag(tp, NO_NVRAM))
12870 		return 0;
12871 
12872 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12873 		return -EIO;
12874 
12875 	if (magic == TG3_EEPROM_MAGIC)
12876 		size = NVRAM_TEST_SIZE;
12877 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879 		    TG3_EEPROM_SB_FORMAT_1) {
12880 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881 			case TG3_EEPROM_SB_REVISION_0:
12882 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12883 				break;
12884 			case TG3_EEPROM_SB_REVISION_2:
12885 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12886 				break;
12887 			case TG3_EEPROM_SB_REVISION_3:
12888 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12889 				break;
12890 			case TG3_EEPROM_SB_REVISION_4:
12891 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12892 				break;
12893 			case TG3_EEPROM_SB_REVISION_5:
12894 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12895 				break;
12896 			case TG3_EEPROM_SB_REVISION_6:
12897 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898 				break;
12899 			default:
12900 				return -EIO;
12901 			}
12902 		} else
12903 			return 0;
12904 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905 		size = NVRAM_SELFBOOT_HW_SIZE;
12906 	else
12907 		return -EIO;
12908 
12909 	buf = kmalloc(size, GFP_KERNEL);
12910 	if (buf == NULL)
12911 		return -ENOMEM;
12912 
12913 	err = -EIO;
12914 	for (i = 0, j = 0; i < size; i += 4, j++) {
12915 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916 		if (err)
12917 			break;
12918 	}
12919 	if (i < size)
12920 		goto out;
12921 
12922 	/* Selfboot format */
12923 	magic = be32_to_cpu(buf[0]);
12924 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925 	    TG3_EEPROM_MAGIC_FW) {
12926 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12927 
12928 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929 		    TG3_EEPROM_SB_REVISION_2) {
12930 			/* For rev 2, the csum doesn't include the MBA. */
12931 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12932 				csum8 += buf8[i];
12933 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12934 				csum8 += buf8[i];
12935 		} else {
12936 			for (i = 0; i < size; i++)
12937 				csum8 += buf8[i];
12938 		}
12939 
12940 		if (csum8 == 0) {
12941 			err = 0;
12942 			goto out;
12943 		}
12944 
12945 		err = -EIO;
12946 		goto out;
12947 	}
12948 
12949 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950 	    TG3_EEPROM_MAGIC_HW) {
12951 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953 		u8 *buf8 = (u8 *) buf;
12954 
12955 		/* Separate the parity bits and the data bytes.  */
12956 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957 			if ((i == 0) || (i == 8)) {
12958 				int l;
12959 				u8 msk;
12960 
12961 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962 					parity[k++] = buf8[i] & msk;
12963 				i++;
12964 			} else if (i == 16) {
12965 				int l;
12966 				u8 msk;
12967 
12968 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969 					parity[k++] = buf8[i] & msk;
12970 				i++;
12971 
12972 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973 					parity[k++] = buf8[i] & msk;
12974 				i++;
12975 			}
12976 			data[j++] = buf8[i];
12977 		}
12978 
12979 		err = -EIO;
12980 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981 			u8 hw8 = hweight8(data[i]);
12982 
12983 			if ((hw8 & 0x1) && parity[i])
12984 				goto out;
12985 			else if (!(hw8 & 0x1) && !parity[i])
12986 				goto out;
12987 		}
12988 		err = 0;
12989 		goto out;
12990 	}
12991 
12992 	err = -EIO;
12993 
12994 	/* Bootstrap checksum at offset 0x10 */
12995 	csum = calc_crc((unsigned char *) buf, 0x10);
12996 	if (csum != le32_to_cpu(buf[0x10/4]))
12997 		goto out;
12998 
12999 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001 	if (csum != le32_to_cpu(buf[0xfc/4]))
13002 		goto out;
13003 
13004 	kfree(buf);
13005 
13006 	buf = tg3_vpd_readblock(tp, &len);
13007 	if (!buf)
13008 		return -ENOMEM;
13009 
13010 	err = pci_vpd_check_csum(buf, len);
13011 	/* go on if no checksum found */
13012 	if (err == 1)
13013 		err = 0;
13014 out:
13015 	kfree(buf);
13016 	return err;
13017 }
13018 
13019 #define TG3_SERDES_TIMEOUT_SEC	2
13020 #define TG3_COPPER_TIMEOUT_SEC	6
13021 
tg3_test_link(struct tg3 * tp)13022 static int tg3_test_link(struct tg3 *tp)
13023 {
13024 	int i, max;
13025 
13026 	if (!netif_running(tp->dev))
13027 		return -ENODEV;
13028 
13029 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13030 		max = TG3_SERDES_TIMEOUT_SEC;
13031 	else
13032 		max = TG3_COPPER_TIMEOUT_SEC;
13033 
13034 	for (i = 0; i < max; i++) {
13035 		if (tp->link_up)
13036 			return 0;
13037 
13038 		if (msleep_interruptible(1000))
13039 			break;
13040 	}
13041 
13042 	return -EIO;
13043 }
13044 
13045 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13046 static int tg3_test_registers(struct tg3 *tp)
13047 {
13048 	int i, is_5705, is_5750;
13049 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13050 	static struct {
13051 		u16 offset;
13052 		u16 flags;
13053 #define TG3_FL_5705	0x1
13054 #define TG3_FL_NOT_5705	0x2
13055 #define TG3_FL_NOT_5788	0x4
13056 #define TG3_FL_NOT_5750	0x8
13057 		u32 read_mask;
13058 		u32 write_mask;
13059 	} reg_tbl[] = {
13060 		/* MAC Control Registers */
13061 		{ MAC_MODE, TG3_FL_NOT_5705,
13062 			0x00000000, 0x00ef6f8c },
13063 		{ MAC_MODE, TG3_FL_5705,
13064 			0x00000000, 0x01ef6b8c },
13065 		{ MAC_STATUS, TG3_FL_NOT_5705,
13066 			0x03800107, 0x00000000 },
13067 		{ MAC_STATUS, TG3_FL_5705,
13068 			0x03800100, 0x00000000 },
13069 		{ MAC_ADDR_0_HIGH, 0x0000,
13070 			0x00000000, 0x0000ffff },
13071 		{ MAC_ADDR_0_LOW, 0x0000,
13072 			0x00000000, 0xffffffff },
13073 		{ MAC_RX_MTU_SIZE, 0x0000,
13074 			0x00000000, 0x0000ffff },
13075 		{ MAC_TX_MODE, 0x0000,
13076 			0x00000000, 0x00000070 },
13077 		{ MAC_TX_LENGTHS, 0x0000,
13078 			0x00000000, 0x00003fff },
13079 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13080 			0x00000000, 0x000007fc },
13081 		{ MAC_RX_MODE, TG3_FL_5705,
13082 			0x00000000, 0x000007dc },
13083 		{ MAC_HASH_REG_0, 0x0000,
13084 			0x00000000, 0xffffffff },
13085 		{ MAC_HASH_REG_1, 0x0000,
13086 			0x00000000, 0xffffffff },
13087 		{ MAC_HASH_REG_2, 0x0000,
13088 			0x00000000, 0xffffffff },
13089 		{ MAC_HASH_REG_3, 0x0000,
13090 			0x00000000, 0xffffffff },
13091 
13092 		/* Receive Data and Receive BD Initiator Control Registers. */
13093 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13094 			0x00000000, 0xffffffff },
13095 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13096 			0x00000000, 0xffffffff },
13097 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13098 			0x00000000, 0x00000003 },
13099 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13100 			0x00000000, 0xffffffff },
13101 		{ RCVDBDI_STD_BD+0, 0x0000,
13102 			0x00000000, 0xffffffff },
13103 		{ RCVDBDI_STD_BD+4, 0x0000,
13104 			0x00000000, 0xffffffff },
13105 		{ RCVDBDI_STD_BD+8, 0x0000,
13106 			0x00000000, 0xffff0002 },
13107 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13108 			0x00000000, 0xffffffff },
13109 
13110 		/* Receive BD Initiator Control Registers. */
13111 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13112 			0x00000000, 0xffffffff },
13113 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13114 			0x00000000, 0x000003ff },
13115 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13116 			0x00000000, 0xffffffff },
13117 
13118 		/* Host Coalescing Control Registers. */
13119 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13120 			0x00000000, 0x00000004 },
13121 		{ HOSTCC_MODE, TG3_FL_5705,
13122 			0x00000000, 0x000000f6 },
13123 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13124 			0x00000000, 0xffffffff },
13125 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13126 			0x00000000, 0x000003ff },
13127 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13128 			0x00000000, 0xffffffff },
13129 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13130 			0x00000000, 0x000003ff },
13131 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13132 			0x00000000, 0xffffffff },
13133 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 			0x00000000, 0x000000ff },
13135 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13136 			0x00000000, 0xffffffff },
13137 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13138 			0x00000000, 0x000000ff },
13139 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13140 			0x00000000, 0xffffffff },
13141 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13142 			0x00000000, 0xffffffff },
13143 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144 			0x00000000, 0xffffffff },
13145 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146 			0x00000000, 0x000000ff },
13147 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13148 			0x00000000, 0xffffffff },
13149 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13150 			0x00000000, 0x000000ff },
13151 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13152 			0x00000000, 0xffffffff },
13153 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13154 			0x00000000, 0xffffffff },
13155 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13156 			0x00000000, 0xffffffff },
13157 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13158 			0x00000000, 0xffffffff },
13159 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13160 			0x00000000, 0xffffffff },
13161 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13162 			0xffffffff, 0x00000000 },
13163 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13164 			0xffffffff, 0x00000000 },
13165 
13166 		/* Buffer Manager Control Registers. */
13167 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13168 			0x00000000, 0x007fff80 },
13169 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13170 			0x00000000, 0x007fffff },
13171 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13172 			0x00000000, 0x0000003f },
13173 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13174 			0x00000000, 0x000001ff },
13175 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13176 			0x00000000, 0x000001ff },
13177 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13178 			0xffffffff, 0x00000000 },
13179 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13180 			0xffffffff, 0x00000000 },
13181 
13182 		/* Mailbox Registers */
13183 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13184 			0x00000000, 0x000001ff },
13185 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13186 			0x00000000, 0x000001ff },
13187 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13188 			0x00000000, 0x000007ff },
13189 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13190 			0x00000000, 0x000001ff },
13191 
13192 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13193 	};
13194 
13195 	is_5705 = is_5750 = 0;
13196 	if (tg3_flag(tp, 5705_PLUS)) {
13197 		is_5705 = 1;
13198 		if (tg3_flag(tp, 5750_PLUS))
13199 			is_5750 = 1;
13200 	}
13201 
13202 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13203 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13204 			continue;
13205 
13206 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13207 			continue;
13208 
13209 		if (tg3_flag(tp, IS_5788) &&
13210 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13211 			continue;
13212 
13213 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13214 			continue;
13215 
13216 		offset = (u32) reg_tbl[i].offset;
13217 		read_mask = reg_tbl[i].read_mask;
13218 		write_mask = reg_tbl[i].write_mask;
13219 
13220 		/* Save the original register content */
13221 		save_val = tr32(offset);
13222 
13223 		/* Determine the read-only value. */
13224 		read_val = save_val & read_mask;
13225 
13226 		/* Write zero to the register, then make sure the read-only bits
13227 		 * are not changed and the read/write bits are all zeros.
13228 		 */
13229 		tw32(offset, 0);
13230 
13231 		val = tr32(offset);
13232 
13233 		/* Test the read-only and read/write bits. */
13234 		if (((val & read_mask) != read_val) || (val & write_mask))
13235 			goto out;
13236 
13237 		/* Write ones to all the bits defined by RdMask and WrMask, then
13238 		 * make sure the read-only bits are not changed and the
13239 		 * read/write bits are all ones.
13240 		 */
13241 		tw32(offset, read_mask | write_mask);
13242 
13243 		val = tr32(offset);
13244 
13245 		/* Test the read-only bits. */
13246 		if ((val & read_mask) != read_val)
13247 			goto out;
13248 
13249 		/* Test the read/write bits. */
13250 		if ((val & write_mask) != write_mask)
13251 			goto out;
13252 
13253 		tw32(offset, save_val);
13254 	}
13255 
13256 	return 0;
13257 
13258 out:
13259 	if (netif_msg_hw(tp))
13260 		netdev_err(tp->dev,
13261 			   "Register test failed at offset %x\n", offset);
13262 	tw32(offset, save_val);
13263 	return -EIO;
13264 }
13265 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13266 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13267 {
13268 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13269 	int i;
13270 	u32 j;
13271 
13272 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13273 		for (j = 0; j < len; j += 4) {
13274 			u32 val;
13275 
13276 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13277 			tg3_read_mem(tp, offset + j, &val);
13278 			if (val != test_pattern[i])
13279 				return -EIO;
13280 		}
13281 	}
13282 	return 0;
13283 }
13284 
tg3_test_memory(struct tg3 * tp)13285 static int tg3_test_memory(struct tg3 *tp)
13286 {
13287 	static struct mem_entry {
13288 		u32 offset;
13289 		u32 len;
13290 	} mem_tbl_570x[] = {
13291 		{ 0x00000000, 0x00b50},
13292 		{ 0x00002000, 0x1c000},
13293 		{ 0xffffffff, 0x00000}
13294 	}, mem_tbl_5705[] = {
13295 		{ 0x00000100, 0x0000c},
13296 		{ 0x00000200, 0x00008},
13297 		{ 0x00004000, 0x00800},
13298 		{ 0x00006000, 0x01000},
13299 		{ 0x00008000, 0x02000},
13300 		{ 0x00010000, 0x0e000},
13301 		{ 0xffffffff, 0x00000}
13302 	}, mem_tbl_5755[] = {
13303 		{ 0x00000200, 0x00008},
13304 		{ 0x00004000, 0x00800},
13305 		{ 0x00006000, 0x00800},
13306 		{ 0x00008000, 0x02000},
13307 		{ 0x00010000, 0x0c000},
13308 		{ 0xffffffff, 0x00000}
13309 	}, mem_tbl_5906[] = {
13310 		{ 0x00000200, 0x00008},
13311 		{ 0x00004000, 0x00400},
13312 		{ 0x00006000, 0x00400},
13313 		{ 0x00008000, 0x01000},
13314 		{ 0x00010000, 0x01000},
13315 		{ 0xffffffff, 0x00000}
13316 	}, mem_tbl_5717[] = {
13317 		{ 0x00000200, 0x00008},
13318 		{ 0x00010000, 0x0a000},
13319 		{ 0x00020000, 0x13c00},
13320 		{ 0xffffffff, 0x00000}
13321 	}, mem_tbl_57765[] = {
13322 		{ 0x00000200, 0x00008},
13323 		{ 0x00004000, 0x00800},
13324 		{ 0x00006000, 0x09800},
13325 		{ 0x00010000, 0x0a000},
13326 		{ 0xffffffff, 0x00000}
13327 	};
13328 	struct mem_entry *mem_tbl;
13329 	int err = 0;
13330 	int i;
13331 
13332 	if (tg3_flag(tp, 5717_PLUS))
13333 		mem_tbl = mem_tbl_5717;
13334 	else if (tg3_flag(tp, 57765_CLASS) ||
13335 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13336 		mem_tbl = mem_tbl_57765;
13337 	else if (tg3_flag(tp, 5755_PLUS))
13338 		mem_tbl = mem_tbl_5755;
13339 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13340 		mem_tbl = mem_tbl_5906;
13341 	else if (tg3_flag(tp, 5705_PLUS))
13342 		mem_tbl = mem_tbl_5705;
13343 	else
13344 		mem_tbl = mem_tbl_570x;
13345 
13346 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13347 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13348 		if (err)
13349 			break;
13350 	}
13351 
13352 	return err;
13353 }
13354 
13355 #define TG3_TSO_MSS		500
13356 
13357 #define TG3_TSO_IP_HDR_LEN	20
13358 #define TG3_TSO_TCP_HDR_LEN	20
13359 #define TG3_TSO_TCP_OPT_LEN	12
13360 
13361 static const u8 tg3_tso_header[] = {
13362 0x08, 0x00,
13363 0x45, 0x00, 0x00, 0x00,
13364 0x00, 0x00, 0x40, 0x00,
13365 0x40, 0x06, 0x00, 0x00,
13366 0x0a, 0x00, 0x00, 0x01,
13367 0x0a, 0x00, 0x00, 0x02,
13368 0x0d, 0x00, 0xe0, 0x00,
13369 0x00, 0x00, 0x01, 0x00,
13370 0x00, 0x00, 0x02, 0x00,
13371 0x80, 0x10, 0x10, 0x00,
13372 0x14, 0x09, 0x00, 0x00,
13373 0x01, 0x01, 0x08, 0x0a,
13374 0x11, 0x11, 0x11, 0x11,
13375 0x11, 0x11, 0x11, 0x11,
13376 };
13377 
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13378 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13379 {
13380 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13381 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13382 	u32 budget;
13383 	struct sk_buff *skb;
13384 	u8 *tx_data, *rx_data;
13385 	dma_addr_t map;
13386 	int num_pkts, tx_len, rx_len, i, err;
13387 	struct tg3_rx_buffer_desc *desc;
13388 	struct tg3_napi *tnapi, *rnapi;
13389 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13390 
13391 	tnapi = &tp->napi[0];
13392 	rnapi = &tp->napi[0];
13393 	if (tp->irq_cnt > 1) {
13394 		if (tg3_flag(tp, ENABLE_RSS))
13395 			rnapi = &tp->napi[1];
13396 		if (tg3_flag(tp, ENABLE_TSS))
13397 			tnapi = &tp->napi[1];
13398 	}
13399 	coal_now = tnapi->coal_now | rnapi->coal_now;
13400 
13401 	err = -EIO;
13402 
13403 	tx_len = pktsz;
13404 	skb = netdev_alloc_skb(tp->dev, tx_len);
13405 	if (!skb)
13406 		return -ENOMEM;
13407 
13408 	tx_data = skb_put(skb, tx_len);
13409 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13410 	memset(tx_data + ETH_ALEN, 0x0, 8);
13411 
13412 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13413 
13414 	if (tso_loopback) {
13415 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13416 
13417 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13418 			      TG3_TSO_TCP_OPT_LEN;
13419 
13420 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13421 		       sizeof(tg3_tso_header));
13422 		mss = TG3_TSO_MSS;
13423 
13424 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13425 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13426 
13427 		/* Set the total length field in the IP header */
13428 		iph->tot_len = htons((u16)(mss + hdr_len));
13429 
13430 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13431 			      TXD_FLAG_CPU_POST_DMA);
13432 
13433 		if (tg3_flag(tp, HW_TSO_1) ||
13434 		    tg3_flag(tp, HW_TSO_2) ||
13435 		    tg3_flag(tp, HW_TSO_3)) {
13436 			struct tcphdr *th;
13437 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13438 			th = (struct tcphdr *)&tx_data[val];
13439 			th->check = 0;
13440 		} else
13441 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13442 
13443 		if (tg3_flag(tp, HW_TSO_3)) {
13444 			mss |= (hdr_len & 0xc) << 12;
13445 			if (hdr_len & 0x10)
13446 				base_flags |= 0x00000010;
13447 			base_flags |= (hdr_len & 0x3e0) << 5;
13448 		} else if (tg3_flag(tp, HW_TSO_2))
13449 			mss |= hdr_len << 9;
13450 		else if (tg3_flag(tp, HW_TSO_1) ||
13451 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13452 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13453 		} else {
13454 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13455 		}
13456 
13457 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13458 	} else {
13459 		num_pkts = 1;
13460 		data_off = ETH_HLEN;
13461 
13462 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13463 		    tx_len > VLAN_ETH_FRAME_LEN)
13464 			base_flags |= TXD_FLAG_JMB_PKT;
13465 	}
13466 
13467 	for (i = data_off; i < tx_len; i++)
13468 		tx_data[i] = (u8) (i & 0xff);
13469 
13470 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13471 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13472 		dev_kfree_skb(skb);
13473 		return -EIO;
13474 	}
13475 
13476 	val = tnapi->tx_prod;
13477 	tnapi->tx_buffers[val].skb = skb;
13478 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13479 
13480 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13481 	       rnapi->coal_now);
13482 
13483 	udelay(10);
13484 
13485 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13486 
13487 	budget = tg3_tx_avail(tnapi);
13488 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13489 			    base_flags | TXD_FLAG_END, mss, 0)) {
13490 		tnapi->tx_buffers[val].skb = NULL;
13491 		dev_kfree_skb(skb);
13492 		return -EIO;
13493 	}
13494 
13495 	tnapi->tx_prod++;
13496 
13497 	/* Sync BD data before updating mailbox */
13498 	wmb();
13499 
13500 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13501 	tr32_mailbox(tnapi->prodmbox);
13502 
13503 	udelay(10);
13504 
13505 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13506 	for (i = 0; i < 35; i++) {
13507 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13508 		       coal_now);
13509 
13510 		udelay(10);
13511 
13512 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13513 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13514 		if ((tx_idx == tnapi->tx_prod) &&
13515 		    (rx_idx == (rx_start_idx + num_pkts)))
13516 			break;
13517 	}
13518 
13519 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13520 	dev_kfree_skb(skb);
13521 
13522 	if (tx_idx != tnapi->tx_prod)
13523 		goto out;
13524 
13525 	if (rx_idx != rx_start_idx + num_pkts)
13526 		goto out;
13527 
13528 	val = data_off;
13529 	while (rx_idx != rx_start_idx) {
13530 		desc = &rnapi->rx_rcb[rx_start_idx++];
13531 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13532 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13533 
13534 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13535 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13536 			goto out;
13537 
13538 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13539 			 - ETH_FCS_LEN;
13540 
13541 		if (!tso_loopback) {
13542 			if (rx_len != tx_len)
13543 				goto out;
13544 
13545 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13546 				if (opaque_key != RXD_OPAQUE_RING_STD)
13547 					goto out;
13548 			} else {
13549 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13550 					goto out;
13551 			}
13552 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13553 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13554 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13555 			goto out;
13556 		}
13557 
13558 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13559 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13560 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13561 					     mapping);
13562 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13563 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13564 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13565 					     mapping);
13566 		} else
13567 			goto out;
13568 
13569 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13570 					DMA_FROM_DEVICE);
13571 
13572 		rx_data += TG3_RX_OFFSET(tp);
13573 		for (i = data_off; i < rx_len; i++, val++) {
13574 			if (*(rx_data + i) != (u8) (val & 0xff))
13575 				goto out;
13576 		}
13577 	}
13578 
13579 	err = 0;
13580 
13581 	/* tg3_free_rings will unmap and free the rx_data */
13582 out:
13583 	return err;
13584 }
13585 
13586 #define TG3_STD_LOOPBACK_FAILED		1
13587 #define TG3_JMB_LOOPBACK_FAILED		2
13588 #define TG3_TSO_LOOPBACK_FAILED		4
13589 #define TG3_LOOPBACK_FAILED \
13590 	(TG3_STD_LOOPBACK_FAILED | \
13591 	 TG3_JMB_LOOPBACK_FAILED | \
13592 	 TG3_TSO_LOOPBACK_FAILED)
13593 
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13594 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13595 {
13596 	int err = -EIO;
13597 	u32 eee_cap;
13598 	u32 jmb_pkt_sz = 9000;
13599 
13600 	if (tp->dma_limit)
13601 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13602 
13603 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13604 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13605 
13606 	if (!netif_running(tp->dev)) {
13607 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13609 		if (do_extlpbk)
13610 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 		goto done;
13612 	}
13613 
13614 	err = tg3_reset_hw(tp, true);
13615 	if (err) {
13616 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13618 		if (do_extlpbk)
13619 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13620 		goto done;
13621 	}
13622 
13623 	if (tg3_flag(tp, ENABLE_RSS)) {
13624 		int i;
13625 
13626 		/* Reroute all rx packets to the 1st queue */
13627 		for (i = MAC_RSS_INDIR_TBL_0;
13628 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13629 			tw32(i, 0x0);
13630 	}
13631 
13632 	/* HW errata - mac loopback fails in some cases on 5780.
13633 	 * Normal traffic and PHY loopback are not affected by
13634 	 * errata.  Also, the MAC loopback test is deprecated for
13635 	 * all newer ASIC revisions.
13636 	 */
13637 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13638 	    !tg3_flag(tp, CPMU_PRESENT)) {
13639 		tg3_mac_loopback(tp, true);
13640 
13641 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13642 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13643 
13644 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13645 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13646 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13647 
13648 		tg3_mac_loopback(tp, false);
13649 	}
13650 
13651 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13652 	    !tg3_flag(tp, USE_PHYLIB)) {
13653 		int i;
13654 
13655 		tg3_phy_lpbk_set(tp, 0, false);
13656 
13657 		/* Wait for link */
13658 		for (i = 0; i < 100; i++) {
13659 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13660 				break;
13661 			mdelay(1);
13662 		}
13663 
13664 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666 		if (tg3_flag(tp, TSO_CAPABLE) &&
13667 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13668 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13669 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13670 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13671 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13672 
13673 		if (do_extlpbk) {
13674 			tg3_phy_lpbk_set(tp, 0, true);
13675 
13676 			/* All link indications report up, but the hardware
13677 			 * isn't really ready for about 20 msec.  Double it
13678 			 * to be sure.
13679 			 */
13680 			mdelay(40);
13681 
13682 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13683 				data[TG3_EXT_LOOPB_TEST] |=
13684 							TG3_STD_LOOPBACK_FAILED;
13685 			if (tg3_flag(tp, TSO_CAPABLE) &&
13686 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13687 				data[TG3_EXT_LOOPB_TEST] |=
13688 							TG3_TSO_LOOPBACK_FAILED;
13689 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13690 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13691 				data[TG3_EXT_LOOPB_TEST] |=
13692 							TG3_JMB_LOOPBACK_FAILED;
13693 		}
13694 
13695 		/* Re-enable gphy autopowerdown. */
13696 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13697 			tg3_phy_toggle_apd(tp, true);
13698 	}
13699 
13700 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13701 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13702 
13703 done:
13704 	tp->phy_flags |= eee_cap;
13705 
13706 	return err;
13707 }
13708 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13709 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13710 			  u64 *data)
13711 {
13712 	struct tg3 *tp = netdev_priv(dev);
13713 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13714 
13715 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13716 		if (tg3_power_up(tp)) {
13717 			etest->flags |= ETH_TEST_FL_FAILED;
13718 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13719 			return;
13720 		}
13721 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13722 	}
13723 
13724 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13725 
13726 	if (tg3_test_nvram(tp) != 0) {
13727 		etest->flags |= ETH_TEST_FL_FAILED;
13728 		data[TG3_NVRAM_TEST] = 1;
13729 	}
13730 	if (!doextlpbk && tg3_test_link(tp)) {
13731 		etest->flags |= ETH_TEST_FL_FAILED;
13732 		data[TG3_LINK_TEST] = 1;
13733 	}
13734 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13735 		int err, err2 = 0, irq_sync = 0;
13736 
13737 		if (netif_running(dev)) {
13738 			tg3_phy_stop(tp);
13739 			tg3_netif_stop(tp);
13740 			irq_sync = 1;
13741 		}
13742 
13743 		tg3_full_lock(tp, irq_sync);
13744 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13745 		err = tg3_nvram_lock(tp);
13746 		tg3_halt_cpu(tp, RX_CPU_BASE);
13747 		if (!tg3_flag(tp, 5705_PLUS))
13748 			tg3_halt_cpu(tp, TX_CPU_BASE);
13749 		if (!err)
13750 			tg3_nvram_unlock(tp);
13751 
13752 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13753 			tg3_phy_reset(tp);
13754 
13755 		if (tg3_test_registers(tp) != 0) {
13756 			etest->flags |= ETH_TEST_FL_FAILED;
13757 			data[TG3_REGISTER_TEST] = 1;
13758 		}
13759 
13760 		if (tg3_test_memory(tp) != 0) {
13761 			etest->flags |= ETH_TEST_FL_FAILED;
13762 			data[TG3_MEMORY_TEST] = 1;
13763 		}
13764 
13765 		if (doextlpbk)
13766 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13767 
13768 		if (tg3_test_loopback(tp, data, doextlpbk))
13769 			etest->flags |= ETH_TEST_FL_FAILED;
13770 
13771 		tg3_full_unlock(tp);
13772 
13773 		if (tg3_test_interrupt(tp) != 0) {
13774 			etest->flags |= ETH_TEST_FL_FAILED;
13775 			data[TG3_INTERRUPT_TEST] = 1;
13776 		}
13777 
13778 		tg3_full_lock(tp, 0);
13779 
13780 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13781 		if (netif_running(dev)) {
13782 			tg3_flag_set(tp, INIT_COMPLETE);
13783 			err2 = tg3_restart_hw(tp, true);
13784 			if (!err2)
13785 				tg3_netif_start(tp);
13786 		}
13787 
13788 		tg3_full_unlock(tp);
13789 
13790 		if (irq_sync && !err2)
13791 			tg3_phy_start(tp);
13792 	}
13793 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13794 		tg3_power_down_prepare(tp);
13795 
13796 }
13797 
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13798 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13799 {
13800 	struct tg3 *tp = netdev_priv(dev);
13801 	struct hwtstamp_config stmpconf;
13802 
13803 	if (!tg3_flag(tp, PTP_CAPABLE))
13804 		return -EOPNOTSUPP;
13805 
13806 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13807 		return -EFAULT;
13808 
13809 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13810 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13811 		return -ERANGE;
13812 
13813 	switch (stmpconf.rx_filter) {
13814 	case HWTSTAMP_FILTER_NONE:
13815 		tp->rxptpctl = 0;
13816 		break;
13817 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13818 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13819 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13820 		break;
13821 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13822 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13823 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13824 		break;
13825 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13826 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13827 			       TG3_RX_PTP_CTL_DELAY_REQ;
13828 		break;
13829 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13830 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13831 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13832 		break;
13833 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13834 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13835 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13836 		break;
13837 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13838 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13839 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13840 		break;
13841 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13842 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13843 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13844 		break;
13845 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13846 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13847 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13848 		break;
13849 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13850 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13851 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13852 		break;
13853 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13854 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13855 			       TG3_RX_PTP_CTL_DELAY_REQ;
13856 		break;
13857 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13858 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13859 			       TG3_RX_PTP_CTL_DELAY_REQ;
13860 		break;
13861 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13862 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13863 			       TG3_RX_PTP_CTL_DELAY_REQ;
13864 		break;
13865 	default:
13866 		return -ERANGE;
13867 	}
13868 
13869 	if (netif_running(dev) && tp->rxptpctl)
13870 		tw32(TG3_RX_PTP_CTL,
13871 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13872 
13873 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13874 		tg3_flag_set(tp, TX_TSTAMP_EN);
13875 	else
13876 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13877 
13878 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13879 		-EFAULT : 0;
13880 }
13881 
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13882 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13883 {
13884 	struct tg3 *tp = netdev_priv(dev);
13885 	struct hwtstamp_config stmpconf;
13886 
13887 	if (!tg3_flag(tp, PTP_CAPABLE))
13888 		return -EOPNOTSUPP;
13889 
13890 	stmpconf.flags = 0;
13891 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13892 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13893 
13894 	switch (tp->rxptpctl) {
13895 	case 0:
13896 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13897 		break;
13898 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13899 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13900 		break;
13901 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13902 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13903 		break;
13904 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13905 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13906 		break;
13907 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13908 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13909 		break;
13910 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13911 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13912 		break;
13913 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13914 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13915 		break;
13916 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13917 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13918 		break;
13919 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13920 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13921 		break;
13922 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13923 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13924 		break;
13925 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13926 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13927 		break;
13928 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13929 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13930 		break;
13931 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13932 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13933 		break;
13934 	default:
13935 		WARN_ON_ONCE(1);
13936 		return -ERANGE;
13937 	}
13938 
13939 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13940 		-EFAULT : 0;
13941 }
13942 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)13943 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13944 {
13945 	struct mii_ioctl_data *data = if_mii(ifr);
13946 	struct tg3 *tp = netdev_priv(dev);
13947 	int err;
13948 
13949 	if (tg3_flag(tp, USE_PHYLIB)) {
13950 		struct phy_device *phydev;
13951 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13952 			return -EAGAIN;
13953 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13954 		return phy_mii_ioctl(phydev, ifr, cmd);
13955 	}
13956 
13957 	switch (cmd) {
13958 	case SIOCGMIIPHY:
13959 		data->phy_id = tp->phy_addr;
13960 
13961 		fallthrough;
13962 	case SIOCGMIIREG: {
13963 		u32 mii_regval;
13964 
13965 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13966 			break;			/* We have no PHY */
13967 
13968 		if (!netif_running(dev))
13969 			return -EAGAIN;
13970 
13971 		spin_lock_bh(&tp->lock);
13972 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13973 				    data->reg_num & 0x1f, &mii_regval);
13974 		spin_unlock_bh(&tp->lock);
13975 
13976 		data->val_out = mii_regval;
13977 
13978 		return err;
13979 	}
13980 
13981 	case SIOCSMIIREG:
13982 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13983 			break;			/* We have no PHY */
13984 
13985 		if (!netif_running(dev))
13986 			return -EAGAIN;
13987 
13988 		spin_lock_bh(&tp->lock);
13989 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13990 				     data->reg_num & 0x1f, data->val_in);
13991 		spin_unlock_bh(&tp->lock);
13992 
13993 		return err;
13994 
13995 	case SIOCSHWTSTAMP:
13996 		return tg3_hwtstamp_set(dev, ifr);
13997 
13998 	case SIOCGHWTSTAMP:
13999 		return tg3_hwtstamp_get(dev, ifr);
14000 
14001 	default:
14002 		/* do nothing */
14003 		break;
14004 	}
14005 	return -EOPNOTSUPP;
14006 }
14007 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14008 static int tg3_get_coalesce(struct net_device *dev,
14009 			    struct ethtool_coalesce *ec,
14010 			    struct kernel_ethtool_coalesce *kernel_coal,
14011 			    struct netlink_ext_ack *extack)
14012 {
14013 	struct tg3 *tp = netdev_priv(dev);
14014 
14015 	memcpy(ec, &tp->coal, sizeof(*ec));
14016 	return 0;
14017 }
14018 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14019 static int tg3_set_coalesce(struct net_device *dev,
14020 			    struct ethtool_coalesce *ec,
14021 			    struct kernel_ethtool_coalesce *kernel_coal,
14022 			    struct netlink_ext_ack *extack)
14023 {
14024 	struct tg3 *tp = netdev_priv(dev);
14025 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14026 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14027 
14028 	if (!tg3_flag(tp, 5705_PLUS)) {
14029 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14030 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14031 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14032 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14033 	}
14034 
14035 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14036 	    (!ec->rx_coalesce_usecs) ||
14037 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14038 	    (!ec->tx_coalesce_usecs) ||
14039 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14040 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14041 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14042 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14043 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14044 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14045 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14046 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14047 		return -EINVAL;
14048 
14049 	/* Only copy relevant parameters, ignore all others. */
14050 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14051 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14052 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14053 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14054 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14055 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14056 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14057 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14058 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14059 
14060 	if (netif_running(dev)) {
14061 		tg3_full_lock(tp, 0);
14062 		__tg3_set_coalesce(tp, &tp->coal);
14063 		tg3_full_unlock(tp);
14064 	}
14065 	return 0;
14066 }
14067 
tg3_set_eee(struct net_device * dev,struct ethtool_eee * edata)14068 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14069 {
14070 	struct tg3 *tp = netdev_priv(dev);
14071 
14072 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14073 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14074 		return -EOPNOTSUPP;
14075 	}
14076 
14077 	if (edata->advertised != tp->eee.advertised) {
14078 		netdev_warn(tp->dev,
14079 			    "Direct manipulation of EEE advertisement is not supported\n");
14080 		return -EINVAL;
14081 	}
14082 
14083 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14084 		netdev_warn(tp->dev,
14085 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14086 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14087 		return -EINVAL;
14088 	}
14089 
14090 	tp->eee = *edata;
14091 
14092 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14093 	tg3_warn_mgmt_link_flap(tp);
14094 
14095 	if (netif_running(tp->dev)) {
14096 		tg3_full_lock(tp, 0);
14097 		tg3_setup_eee(tp);
14098 		tg3_phy_reset(tp);
14099 		tg3_full_unlock(tp);
14100 	}
14101 
14102 	return 0;
14103 }
14104 
tg3_get_eee(struct net_device * dev,struct ethtool_eee * edata)14105 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14106 {
14107 	struct tg3 *tp = netdev_priv(dev);
14108 
14109 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14110 		netdev_warn(tp->dev,
14111 			    "Board does not support EEE!\n");
14112 		return -EOPNOTSUPP;
14113 	}
14114 
14115 	*edata = tp->eee;
14116 	return 0;
14117 }
14118 
14119 static const struct ethtool_ops tg3_ethtool_ops = {
14120 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14121 				     ETHTOOL_COALESCE_MAX_FRAMES |
14122 				     ETHTOOL_COALESCE_USECS_IRQ |
14123 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14124 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14125 	.get_drvinfo		= tg3_get_drvinfo,
14126 	.get_regs_len		= tg3_get_regs_len,
14127 	.get_regs		= tg3_get_regs,
14128 	.get_wol		= tg3_get_wol,
14129 	.set_wol		= tg3_set_wol,
14130 	.get_msglevel		= tg3_get_msglevel,
14131 	.set_msglevel		= tg3_set_msglevel,
14132 	.nway_reset		= tg3_nway_reset,
14133 	.get_link		= ethtool_op_get_link,
14134 	.get_eeprom_len		= tg3_get_eeprom_len,
14135 	.get_eeprom		= tg3_get_eeprom,
14136 	.set_eeprom		= tg3_set_eeprom,
14137 	.get_ringparam		= tg3_get_ringparam,
14138 	.set_ringparam		= tg3_set_ringparam,
14139 	.get_pauseparam		= tg3_get_pauseparam,
14140 	.set_pauseparam		= tg3_set_pauseparam,
14141 	.self_test		= tg3_self_test,
14142 	.get_strings		= tg3_get_strings,
14143 	.set_phys_id		= tg3_set_phys_id,
14144 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14145 	.get_coalesce		= tg3_get_coalesce,
14146 	.set_coalesce		= tg3_set_coalesce,
14147 	.get_sset_count		= tg3_get_sset_count,
14148 	.get_rxnfc		= tg3_get_rxnfc,
14149 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14150 	.get_rxfh		= tg3_get_rxfh,
14151 	.set_rxfh		= tg3_set_rxfh,
14152 	.get_channels		= tg3_get_channels,
14153 	.set_channels		= tg3_set_channels,
14154 	.get_ts_info		= tg3_get_ts_info,
14155 	.get_eee		= tg3_get_eee,
14156 	.set_eee		= tg3_set_eee,
14157 	.get_link_ksettings	= tg3_get_link_ksettings,
14158 	.set_link_ksettings	= tg3_set_link_ksettings,
14159 };
14160 
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14161 static void tg3_get_stats64(struct net_device *dev,
14162 			    struct rtnl_link_stats64 *stats)
14163 {
14164 	struct tg3 *tp = netdev_priv(dev);
14165 
14166 	spin_lock_bh(&tp->lock);
14167 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14168 		*stats = tp->net_stats_prev;
14169 		spin_unlock_bh(&tp->lock);
14170 		return;
14171 	}
14172 
14173 	tg3_get_nstats(tp, stats);
14174 	spin_unlock_bh(&tp->lock);
14175 }
14176 
tg3_set_rx_mode(struct net_device * dev)14177 static void tg3_set_rx_mode(struct net_device *dev)
14178 {
14179 	struct tg3 *tp = netdev_priv(dev);
14180 
14181 	if (!netif_running(dev))
14182 		return;
14183 
14184 	tg3_full_lock(tp, 0);
14185 	__tg3_set_rx_mode(dev);
14186 	tg3_full_unlock(tp);
14187 }
14188 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14189 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14190 			       int new_mtu)
14191 {
14192 	dev->mtu = new_mtu;
14193 
14194 	if (new_mtu > ETH_DATA_LEN) {
14195 		if (tg3_flag(tp, 5780_CLASS)) {
14196 			netdev_update_features(dev);
14197 			tg3_flag_clear(tp, TSO_CAPABLE);
14198 		} else {
14199 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14200 		}
14201 	} else {
14202 		if (tg3_flag(tp, 5780_CLASS)) {
14203 			tg3_flag_set(tp, TSO_CAPABLE);
14204 			netdev_update_features(dev);
14205 		}
14206 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14207 	}
14208 }
14209 
tg3_change_mtu(struct net_device * dev,int new_mtu)14210 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14211 {
14212 	struct tg3 *tp = netdev_priv(dev);
14213 	int err;
14214 	bool reset_phy = false;
14215 
14216 	if (!netif_running(dev)) {
14217 		/* We'll just catch it later when the
14218 		 * device is up'd.
14219 		 */
14220 		tg3_set_mtu(dev, tp, new_mtu);
14221 		return 0;
14222 	}
14223 
14224 	tg3_phy_stop(tp);
14225 
14226 	tg3_netif_stop(tp);
14227 
14228 	tg3_set_mtu(dev, tp, new_mtu);
14229 
14230 	tg3_full_lock(tp, 1);
14231 
14232 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14233 
14234 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14235 	 * breaks all requests to 256 bytes.
14236 	 */
14237 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14238 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14239 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14240 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14241 		reset_phy = true;
14242 
14243 	err = tg3_restart_hw(tp, reset_phy);
14244 
14245 	if (!err)
14246 		tg3_netif_start(tp);
14247 
14248 	tg3_full_unlock(tp);
14249 
14250 	if (!err)
14251 		tg3_phy_start(tp);
14252 
14253 	return err;
14254 }
14255 
14256 static const struct net_device_ops tg3_netdev_ops = {
14257 	.ndo_open		= tg3_open,
14258 	.ndo_stop		= tg3_close,
14259 	.ndo_start_xmit		= tg3_start_xmit,
14260 	.ndo_get_stats64	= tg3_get_stats64,
14261 	.ndo_validate_addr	= eth_validate_addr,
14262 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14263 	.ndo_set_mac_address	= tg3_set_mac_addr,
14264 	.ndo_eth_ioctl		= tg3_ioctl,
14265 	.ndo_tx_timeout		= tg3_tx_timeout,
14266 	.ndo_change_mtu		= tg3_change_mtu,
14267 	.ndo_fix_features	= tg3_fix_features,
14268 	.ndo_set_features	= tg3_set_features,
14269 #ifdef CONFIG_NET_POLL_CONTROLLER
14270 	.ndo_poll_controller	= tg3_poll_controller,
14271 #endif
14272 };
14273 
tg3_get_eeprom_size(struct tg3 * tp)14274 static void tg3_get_eeprom_size(struct tg3 *tp)
14275 {
14276 	u32 cursize, val, magic;
14277 
14278 	tp->nvram_size = EEPROM_CHIP_SIZE;
14279 
14280 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14281 		return;
14282 
14283 	if ((magic != TG3_EEPROM_MAGIC) &&
14284 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14285 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14286 		return;
14287 
14288 	/*
14289 	 * Size the chip by reading offsets at increasing powers of two.
14290 	 * When we encounter our validation signature, we know the addressing
14291 	 * has wrapped around, and thus have our chip size.
14292 	 */
14293 	cursize = 0x10;
14294 
14295 	while (cursize < tp->nvram_size) {
14296 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14297 			return;
14298 
14299 		if (val == magic)
14300 			break;
14301 
14302 		cursize <<= 1;
14303 	}
14304 
14305 	tp->nvram_size = cursize;
14306 }
14307 
tg3_get_nvram_size(struct tg3 * tp)14308 static void tg3_get_nvram_size(struct tg3 *tp)
14309 {
14310 	u32 val;
14311 
14312 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14313 		return;
14314 
14315 	/* Selfboot format */
14316 	if (val != TG3_EEPROM_MAGIC) {
14317 		tg3_get_eeprom_size(tp);
14318 		return;
14319 	}
14320 
14321 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14322 		if (val != 0) {
14323 			/* This is confusing.  We want to operate on the
14324 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14325 			 * call will read from NVRAM and byteswap the data
14326 			 * according to the byteswapping settings for all
14327 			 * other register accesses.  This ensures the data we
14328 			 * want will always reside in the lower 16-bits.
14329 			 * However, the data in NVRAM is in LE format, which
14330 			 * means the data from the NVRAM read will always be
14331 			 * opposite the endianness of the CPU.  The 16-bit
14332 			 * byteswap then brings the data to CPU endianness.
14333 			 */
14334 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14335 			return;
14336 		}
14337 	}
14338 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14339 }
14340 
tg3_get_nvram_info(struct tg3 * tp)14341 static void tg3_get_nvram_info(struct tg3 *tp)
14342 {
14343 	u32 nvcfg1;
14344 
14345 	nvcfg1 = tr32(NVRAM_CFG1);
14346 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14347 		tg3_flag_set(tp, FLASH);
14348 	} else {
14349 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14350 		tw32(NVRAM_CFG1, nvcfg1);
14351 	}
14352 
14353 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14354 	    tg3_flag(tp, 5780_CLASS)) {
14355 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14356 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14357 			tp->nvram_jedecnum = JEDEC_ATMEL;
14358 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14359 			tg3_flag_set(tp, NVRAM_BUFFERED);
14360 			break;
14361 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14362 			tp->nvram_jedecnum = JEDEC_ATMEL;
14363 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14364 			break;
14365 		case FLASH_VENDOR_ATMEL_EEPROM:
14366 			tp->nvram_jedecnum = JEDEC_ATMEL;
14367 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14368 			tg3_flag_set(tp, NVRAM_BUFFERED);
14369 			break;
14370 		case FLASH_VENDOR_ST:
14371 			tp->nvram_jedecnum = JEDEC_ST;
14372 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14373 			tg3_flag_set(tp, NVRAM_BUFFERED);
14374 			break;
14375 		case FLASH_VENDOR_SAIFUN:
14376 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14377 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14378 			break;
14379 		case FLASH_VENDOR_SST_SMALL:
14380 		case FLASH_VENDOR_SST_LARGE:
14381 			tp->nvram_jedecnum = JEDEC_SST;
14382 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14383 			break;
14384 		}
14385 	} else {
14386 		tp->nvram_jedecnum = JEDEC_ATMEL;
14387 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14388 		tg3_flag_set(tp, NVRAM_BUFFERED);
14389 	}
14390 }
14391 
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14392 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14393 {
14394 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14395 	case FLASH_5752PAGE_SIZE_256:
14396 		tp->nvram_pagesize = 256;
14397 		break;
14398 	case FLASH_5752PAGE_SIZE_512:
14399 		tp->nvram_pagesize = 512;
14400 		break;
14401 	case FLASH_5752PAGE_SIZE_1K:
14402 		tp->nvram_pagesize = 1024;
14403 		break;
14404 	case FLASH_5752PAGE_SIZE_2K:
14405 		tp->nvram_pagesize = 2048;
14406 		break;
14407 	case FLASH_5752PAGE_SIZE_4K:
14408 		tp->nvram_pagesize = 4096;
14409 		break;
14410 	case FLASH_5752PAGE_SIZE_264:
14411 		tp->nvram_pagesize = 264;
14412 		break;
14413 	case FLASH_5752PAGE_SIZE_528:
14414 		tp->nvram_pagesize = 528;
14415 		break;
14416 	}
14417 }
14418 
tg3_get_5752_nvram_info(struct tg3 * tp)14419 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14420 {
14421 	u32 nvcfg1;
14422 
14423 	nvcfg1 = tr32(NVRAM_CFG1);
14424 
14425 	/* NVRAM protection for TPM */
14426 	if (nvcfg1 & (1 << 27))
14427 		tg3_flag_set(tp, PROTECTED_NVRAM);
14428 
14429 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14430 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14431 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14432 		tp->nvram_jedecnum = JEDEC_ATMEL;
14433 		tg3_flag_set(tp, NVRAM_BUFFERED);
14434 		break;
14435 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14436 		tp->nvram_jedecnum = JEDEC_ATMEL;
14437 		tg3_flag_set(tp, NVRAM_BUFFERED);
14438 		tg3_flag_set(tp, FLASH);
14439 		break;
14440 	case FLASH_5752VENDOR_ST_M45PE10:
14441 	case FLASH_5752VENDOR_ST_M45PE20:
14442 	case FLASH_5752VENDOR_ST_M45PE40:
14443 		tp->nvram_jedecnum = JEDEC_ST;
14444 		tg3_flag_set(tp, NVRAM_BUFFERED);
14445 		tg3_flag_set(tp, FLASH);
14446 		break;
14447 	}
14448 
14449 	if (tg3_flag(tp, FLASH)) {
14450 		tg3_nvram_get_pagesize(tp, nvcfg1);
14451 	} else {
14452 		/* For eeprom, set pagesize to maximum eeprom size */
14453 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14454 
14455 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14456 		tw32(NVRAM_CFG1, nvcfg1);
14457 	}
14458 }
14459 
tg3_get_5755_nvram_info(struct tg3 * tp)14460 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14461 {
14462 	u32 nvcfg1, protect = 0;
14463 
14464 	nvcfg1 = tr32(NVRAM_CFG1);
14465 
14466 	/* NVRAM protection for TPM */
14467 	if (nvcfg1 & (1 << 27)) {
14468 		tg3_flag_set(tp, PROTECTED_NVRAM);
14469 		protect = 1;
14470 	}
14471 
14472 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14473 	switch (nvcfg1) {
14474 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14475 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14476 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14477 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14478 		tp->nvram_jedecnum = JEDEC_ATMEL;
14479 		tg3_flag_set(tp, NVRAM_BUFFERED);
14480 		tg3_flag_set(tp, FLASH);
14481 		tp->nvram_pagesize = 264;
14482 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14483 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14484 			tp->nvram_size = (protect ? 0x3e200 :
14485 					  TG3_NVRAM_SIZE_512KB);
14486 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14487 			tp->nvram_size = (protect ? 0x1f200 :
14488 					  TG3_NVRAM_SIZE_256KB);
14489 		else
14490 			tp->nvram_size = (protect ? 0x1f200 :
14491 					  TG3_NVRAM_SIZE_128KB);
14492 		break;
14493 	case FLASH_5752VENDOR_ST_M45PE10:
14494 	case FLASH_5752VENDOR_ST_M45PE20:
14495 	case FLASH_5752VENDOR_ST_M45PE40:
14496 		tp->nvram_jedecnum = JEDEC_ST;
14497 		tg3_flag_set(tp, NVRAM_BUFFERED);
14498 		tg3_flag_set(tp, FLASH);
14499 		tp->nvram_pagesize = 256;
14500 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14501 			tp->nvram_size = (protect ?
14502 					  TG3_NVRAM_SIZE_64KB :
14503 					  TG3_NVRAM_SIZE_128KB);
14504 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14505 			tp->nvram_size = (protect ?
14506 					  TG3_NVRAM_SIZE_64KB :
14507 					  TG3_NVRAM_SIZE_256KB);
14508 		else
14509 			tp->nvram_size = (protect ?
14510 					  TG3_NVRAM_SIZE_128KB :
14511 					  TG3_NVRAM_SIZE_512KB);
14512 		break;
14513 	}
14514 }
14515 
tg3_get_5787_nvram_info(struct tg3 * tp)14516 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14517 {
14518 	u32 nvcfg1;
14519 
14520 	nvcfg1 = tr32(NVRAM_CFG1);
14521 
14522 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14523 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14524 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14525 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14526 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14527 		tp->nvram_jedecnum = JEDEC_ATMEL;
14528 		tg3_flag_set(tp, NVRAM_BUFFERED);
14529 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14530 
14531 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14532 		tw32(NVRAM_CFG1, nvcfg1);
14533 		break;
14534 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14535 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14536 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14537 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14538 		tp->nvram_jedecnum = JEDEC_ATMEL;
14539 		tg3_flag_set(tp, NVRAM_BUFFERED);
14540 		tg3_flag_set(tp, FLASH);
14541 		tp->nvram_pagesize = 264;
14542 		break;
14543 	case FLASH_5752VENDOR_ST_M45PE10:
14544 	case FLASH_5752VENDOR_ST_M45PE20:
14545 	case FLASH_5752VENDOR_ST_M45PE40:
14546 		tp->nvram_jedecnum = JEDEC_ST;
14547 		tg3_flag_set(tp, NVRAM_BUFFERED);
14548 		tg3_flag_set(tp, FLASH);
14549 		tp->nvram_pagesize = 256;
14550 		break;
14551 	}
14552 }
14553 
tg3_get_5761_nvram_info(struct tg3 * tp)14554 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14555 {
14556 	u32 nvcfg1, protect = 0;
14557 
14558 	nvcfg1 = tr32(NVRAM_CFG1);
14559 
14560 	/* NVRAM protection for TPM */
14561 	if (nvcfg1 & (1 << 27)) {
14562 		tg3_flag_set(tp, PROTECTED_NVRAM);
14563 		protect = 1;
14564 	}
14565 
14566 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14567 	switch (nvcfg1) {
14568 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14569 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14570 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14571 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14572 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14573 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14574 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14575 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14576 		tp->nvram_jedecnum = JEDEC_ATMEL;
14577 		tg3_flag_set(tp, NVRAM_BUFFERED);
14578 		tg3_flag_set(tp, FLASH);
14579 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14580 		tp->nvram_pagesize = 256;
14581 		break;
14582 	case FLASH_5761VENDOR_ST_A_M45PE20:
14583 	case FLASH_5761VENDOR_ST_A_M45PE40:
14584 	case FLASH_5761VENDOR_ST_A_M45PE80:
14585 	case FLASH_5761VENDOR_ST_A_M45PE16:
14586 	case FLASH_5761VENDOR_ST_M_M45PE20:
14587 	case FLASH_5761VENDOR_ST_M_M45PE40:
14588 	case FLASH_5761VENDOR_ST_M_M45PE80:
14589 	case FLASH_5761VENDOR_ST_M_M45PE16:
14590 		tp->nvram_jedecnum = JEDEC_ST;
14591 		tg3_flag_set(tp, NVRAM_BUFFERED);
14592 		tg3_flag_set(tp, FLASH);
14593 		tp->nvram_pagesize = 256;
14594 		break;
14595 	}
14596 
14597 	if (protect) {
14598 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14599 	} else {
14600 		switch (nvcfg1) {
14601 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14602 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14603 		case FLASH_5761VENDOR_ST_A_M45PE16:
14604 		case FLASH_5761VENDOR_ST_M_M45PE16:
14605 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14606 			break;
14607 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14608 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14609 		case FLASH_5761VENDOR_ST_A_M45PE80:
14610 		case FLASH_5761VENDOR_ST_M_M45PE80:
14611 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14612 			break;
14613 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14614 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14615 		case FLASH_5761VENDOR_ST_A_M45PE40:
14616 		case FLASH_5761VENDOR_ST_M_M45PE40:
14617 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14618 			break;
14619 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14620 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14621 		case FLASH_5761VENDOR_ST_A_M45PE20:
14622 		case FLASH_5761VENDOR_ST_M_M45PE20:
14623 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14624 			break;
14625 		}
14626 	}
14627 }
14628 
tg3_get_5906_nvram_info(struct tg3 * tp)14629 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14630 {
14631 	tp->nvram_jedecnum = JEDEC_ATMEL;
14632 	tg3_flag_set(tp, NVRAM_BUFFERED);
14633 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14634 }
14635 
tg3_get_57780_nvram_info(struct tg3 * tp)14636 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14637 {
14638 	u32 nvcfg1;
14639 
14640 	nvcfg1 = tr32(NVRAM_CFG1);
14641 
14642 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14643 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14644 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14645 		tp->nvram_jedecnum = JEDEC_ATMEL;
14646 		tg3_flag_set(tp, NVRAM_BUFFERED);
14647 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14648 
14649 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14650 		tw32(NVRAM_CFG1, nvcfg1);
14651 		return;
14652 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14653 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14654 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14655 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14656 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14657 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14658 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14659 		tp->nvram_jedecnum = JEDEC_ATMEL;
14660 		tg3_flag_set(tp, NVRAM_BUFFERED);
14661 		tg3_flag_set(tp, FLASH);
14662 
14663 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14664 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14665 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14666 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14667 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14668 			break;
14669 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14670 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14671 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14672 			break;
14673 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14674 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14675 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14676 			break;
14677 		}
14678 		break;
14679 	case FLASH_5752VENDOR_ST_M45PE10:
14680 	case FLASH_5752VENDOR_ST_M45PE20:
14681 	case FLASH_5752VENDOR_ST_M45PE40:
14682 		tp->nvram_jedecnum = JEDEC_ST;
14683 		tg3_flag_set(tp, NVRAM_BUFFERED);
14684 		tg3_flag_set(tp, FLASH);
14685 
14686 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14687 		case FLASH_5752VENDOR_ST_M45PE10:
14688 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14689 			break;
14690 		case FLASH_5752VENDOR_ST_M45PE20:
14691 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14692 			break;
14693 		case FLASH_5752VENDOR_ST_M45PE40:
14694 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14695 			break;
14696 		}
14697 		break;
14698 	default:
14699 		tg3_flag_set(tp, NO_NVRAM);
14700 		return;
14701 	}
14702 
14703 	tg3_nvram_get_pagesize(tp, nvcfg1);
14704 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14705 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14706 }
14707 
14708 
tg3_get_5717_nvram_info(struct tg3 * tp)14709 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14710 {
14711 	u32 nvcfg1;
14712 
14713 	nvcfg1 = tr32(NVRAM_CFG1);
14714 
14715 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14716 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14717 	case FLASH_5717VENDOR_MICRO_EEPROM:
14718 		tp->nvram_jedecnum = JEDEC_ATMEL;
14719 		tg3_flag_set(tp, NVRAM_BUFFERED);
14720 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14721 
14722 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14723 		tw32(NVRAM_CFG1, nvcfg1);
14724 		return;
14725 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14726 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14727 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14728 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14729 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14730 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14731 	case FLASH_5717VENDOR_ATMEL_45USPT:
14732 		tp->nvram_jedecnum = JEDEC_ATMEL;
14733 		tg3_flag_set(tp, NVRAM_BUFFERED);
14734 		tg3_flag_set(tp, FLASH);
14735 
14736 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14737 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14738 			/* Detect size with tg3_nvram_get_size() */
14739 			break;
14740 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14741 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14742 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14743 			break;
14744 		default:
14745 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14746 			break;
14747 		}
14748 		break;
14749 	case FLASH_5717VENDOR_ST_M_M25PE10:
14750 	case FLASH_5717VENDOR_ST_A_M25PE10:
14751 	case FLASH_5717VENDOR_ST_M_M45PE10:
14752 	case FLASH_5717VENDOR_ST_A_M45PE10:
14753 	case FLASH_5717VENDOR_ST_M_M25PE20:
14754 	case FLASH_5717VENDOR_ST_A_M25PE20:
14755 	case FLASH_5717VENDOR_ST_M_M45PE20:
14756 	case FLASH_5717VENDOR_ST_A_M45PE20:
14757 	case FLASH_5717VENDOR_ST_25USPT:
14758 	case FLASH_5717VENDOR_ST_45USPT:
14759 		tp->nvram_jedecnum = JEDEC_ST;
14760 		tg3_flag_set(tp, NVRAM_BUFFERED);
14761 		tg3_flag_set(tp, FLASH);
14762 
14763 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14764 		case FLASH_5717VENDOR_ST_M_M25PE20:
14765 		case FLASH_5717VENDOR_ST_M_M45PE20:
14766 			/* Detect size with tg3_nvram_get_size() */
14767 			break;
14768 		case FLASH_5717VENDOR_ST_A_M25PE20:
14769 		case FLASH_5717VENDOR_ST_A_M45PE20:
14770 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14771 			break;
14772 		default:
14773 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14774 			break;
14775 		}
14776 		break;
14777 	default:
14778 		tg3_flag_set(tp, NO_NVRAM);
14779 		return;
14780 	}
14781 
14782 	tg3_nvram_get_pagesize(tp, nvcfg1);
14783 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14784 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14785 }
14786 
tg3_get_5720_nvram_info(struct tg3 * tp)14787 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14788 {
14789 	u32 nvcfg1, nvmpinstrp, nv_status;
14790 
14791 	nvcfg1 = tr32(NVRAM_CFG1);
14792 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14793 
14794 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14795 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14796 			tg3_flag_set(tp, NO_NVRAM);
14797 			return;
14798 		}
14799 
14800 		switch (nvmpinstrp) {
14801 		case FLASH_5762_MX25L_100:
14802 		case FLASH_5762_MX25L_200:
14803 		case FLASH_5762_MX25L_400:
14804 		case FLASH_5762_MX25L_800:
14805 		case FLASH_5762_MX25L_160_320:
14806 			tp->nvram_pagesize = 4096;
14807 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14808 			tg3_flag_set(tp, NVRAM_BUFFERED);
14809 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14810 			tg3_flag_set(tp, FLASH);
14811 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14812 			tp->nvram_size =
14813 				(1 << (nv_status >> AUTOSENSE_DEVID &
14814 						AUTOSENSE_DEVID_MASK)
14815 					<< AUTOSENSE_SIZE_IN_MB);
14816 			return;
14817 
14818 		case FLASH_5762_EEPROM_HD:
14819 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14820 			break;
14821 		case FLASH_5762_EEPROM_LD:
14822 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14823 			break;
14824 		case FLASH_5720VENDOR_M_ST_M45PE20:
14825 			/* This pinstrap supports multiple sizes, so force it
14826 			 * to read the actual size from location 0xf0.
14827 			 */
14828 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14829 			break;
14830 		}
14831 	}
14832 
14833 	switch (nvmpinstrp) {
14834 	case FLASH_5720_EEPROM_HD:
14835 	case FLASH_5720_EEPROM_LD:
14836 		tp->nvram_jedecnum = JEDEC_ATMEL;
14837 		tg3_flag_set(tp, NVRAM_BUFFERED);
14838 
14839 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14840 		tw32(NVRAM_CFG1, nvcfg1);
14841 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14842 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14843 		else
14844 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14845 		return;
14846 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14847 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14848 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14849 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14850 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14851 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14852 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14853 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14854 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14855 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14856 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14857 	case FLASH_5720VENDOR_ATMEL_45USPT:
14858 		tp->nvram_jedecnum = JEDEC_ATMEL;
14859 		tg3_flag_set(tp, NVRAM_BUFFERED);
14860 		tg3_flag_set(tp, FLASH);
14861 
14862 		switch (nvmpinstrp) {
14863 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14864 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14865 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14866 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14867 			break;
14868 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14869 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14870 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14871 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14872 			break;
14873 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14874 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14875 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14876 			break;
14877 		default:
14878 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14879 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14880 			break;
14881 		}
14882 		break;
14883 	case FLASH_5720VENDOR_M_ST_M25PE10:
14884 	case FLASH_5720VENDOR_M_ST_M45PE10:
14885 	case FLASH_5720VENDOR_A_ST_M25PE10:
14886 	case FLASH_5720VENDOR_A_ST_M45PE10:
14887 	case FLASH_5720VENDOR_M_ST_M25PE20:
14888 	case FLASH_5720VENDOR_M_ST_M45PE20:
14889 	case FLASH_5720VENDOR_A_ST_M25PE20:
14890 	case FLASH_5720VENDOR_A_ST_M45PE20:
14891 	case FLASH_5720VENDOR_M_ST_M25PE40:
14892 	case FLASH_5720VENDOR_M_ST_M45PE40:
14893 	case FLASH_5720VENDOR_A_ST_M25PE40:
14894 	case FLASH_5720VENDOR_A_ST_M45PE40:
14895 	case FLASH_5720VENDOR_M_ST_M25PE80:
14896 	case FLASH_5720VENDOR_M_ST_M45PE80:
14897 	case FLASH_5720VENDOR_A_ST_M25PE80:
14898 	case FLASH_5720VENDOR_A_ST_M45PE80:
14899 	case FLASH_5720VENDOR_ST_25USPT:
14900 	case FLASH_5720VENDOR_ST_45USPT:
14901 		tp->nvram_jedecnum = JEDEC_ST;
14902 		tg3_flag_set(tp, NVRAM_BUFFERED);
14903 		tg3_flag_set(tp, FLASH);
14904 
14905 		switch (nvmpinstrp) {
14906 		case FLASH_5720VENDOR_M_ST_M25PE20:
14907 		case FLASH_5720VENDOR_M_ST_M45PE20:
14908 		case FLASH_5720VENDOR_A_ST_M25PE20:
14909 		case FLASH_5720VENDOR_A_ST_M45PE20:
14910 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14911 			break;
14912 		case FLASH_5720VENDOR_M_ST_M25PE40:
14913 		case FLASH_5720VENDOR_M_ST_M45PE40:
14914 		case FLASH_5720VENDOR_A_ST_M25PE40:
14915 		case FLASH_5720VENDOR_A_ST_M45PE40:
14916 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14917 			break;
14918 		case FLASH_5720VENDOR_M_ST_M25PE80:
14919 		case FLASH_5720VENDOR_M_ST_M45PE80:
14920 		case FLASH_5720VENDOR_A_ST_M25PE80:
14921 		case FLASH_5720VENDOR_A_ST_M45PE80:
14922 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14923 			break;
14924 		default:
14925 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14926 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14927 			break;
14928 		}
14929 		break;
14930 	default:
14931 		tg3_flag_set(tp, NO_NVRAM);
14932 		return;
14933 	}
14934 
14935 	tg3_nvram_get_pagesize(tp, nvcfg1);
14936 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14937 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14938 
14939 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14940 		u32 val;
14941 
14942 		if (tg3_nvram_read(tp, 0, &val))
14943 			return;
14944 
14945 		if (val != TG3_EEPROM_MAGIC &&
14946 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14947 			tg3_flag_set(tp, NO_NVRAM);
14948 	}
14949 }
14950 
14951 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)14952 static void tg3_nvram_init(struct tg3 *tp)
14953 {
14954 	if (tg3_flag(tp, IS_SSB_CORE)) {
14955 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14956 		tg3_flag_clear(tp, NVRAM);
14957 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14958 		tg3_flag_set(tp, NO_NVRAM);
14959 		return;
14960 	}
14961 
14962 	tw32_f(GRC_EEPROM_ADDR,
14963 	     (EEPROM_ADDR_FSM_RESET |
14964 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14965 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14966 
14967 	msleep(1);
14968 
14969 	/* Enable seeprom accesses. */
14970 	tw32_f(GRC_LOCAL_CTRL,
14971 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14972 	udelay(100);
14973 
14974 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14975 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14976 		tg3_flag_set(tp, NVRAM);
14977 
14978 		if (tg3_nvram_lock(tp)) {
14979 			netdev_warn(tp->dev,
14980 				    "Cannot get nvram lock, %s failed\n",
14981 				    __func__);
14982 			return;
14983 		}
14984 		tg3_enable_nvram_access(tp);
14985 
14986 		tp->nvram_size = 0;
14987 
14988 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14989 			tg3_get_5752_nvram_info(tp);
14990 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14991 			tg3_get_5755_nvram_info(tp);
14992 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14993 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14994 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14995 			tg3_get_5787_nvram_info(tp);
14996 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14997 			tg3_get_5761_nvram_info(tp);
14998 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14999 			tg3_get_5906_nvram_info(tp);
15000 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15001 			 tg3_flag(tp, 57765_CLASS))
15002 			tg3_get_57780_nvram_info(tp);
15003 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15004 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15005 			tg3_get_5717_nvram_info(tp);
15006 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15007 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15008 			tg3_get_5720_nvram_info(tp);
15009 		else
15010 			tg3_get_nvram_info(tp);
15011 
15012 		if (tp->nvram_size == 0)
15013 			tg3_get_nvram_size(tp);
15014 
15015 		tg3_disable_nvram_access(tp);
15016 		tg3_nvram_unlock(tp);
15017 
15018 	} else {
15019 		tg3_flag_clear(tp, NVRAM);
15020 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15021 
15022 		tg3_get_eeprom_size(tp);
15023 	}
15024 }
15025 
15026 struct subsys_tbl_ent {
15027 	u16 subsys_vendor, subsys_devid;
15028 	u32 phy_id;
15029 };
15030 
15031 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15032 	/* Broadcom boards. */
15033 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15035 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15037 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15039 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15041 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15043 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15045 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15047 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15049 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15050 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15051 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15052 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15053 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15054 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15055 
15056 	/* 3com boards. */
15057 	{ TG3PCI_SUBVENDOR_ID_3COM,
15058 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15059 	{ TG3PCI_SUBVENDOR_ID_3COM,
15060 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15061 	{ TG3PCI_SUBVENDOR_ID_3COM,
15062 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15063 	{ TG3PCI_SUBVENDOR_ID_3COM,
15064 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15065 	{ TG3PCI_SUBVENDOR_ID_3COM,
15066 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15067 
15068 	/* DELL boards. */
15069 	{ TG3PCI_SUBVENDOR_ID_DELL,
15070 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15071 	{ TG3PCI_SUBVENDOR_ID_DELL,
15072 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15073 	{ TG3PCI_SUBVENDOR_ID_DELL,
15074 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15075 	{ TG3PCI_SUBVENDOR_ID_DELL,
15076 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15077 
15078 	/* Compaq boards. */
15079 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15081 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15082 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15083 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15084 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15085 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15086 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15087 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15088 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15089 
15090 	/* IBM boards. */
15091 	{ TG3PCI_SUBVENDOR_ID_IBM,
15092 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15093 };
15094 
tg3_lookup_by_subsys(struct tg3 * tp)15095 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15096 {
15097 	int i;
15098 
15099 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15100 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15101 		     tp->pdev->subsystem_vendor) &&
15102 		    (subsys_id_to_phy_id[i].subsys_devid ==
15103 		     tp->pdev->subsystem_device))
15104 			return &subsys_id_to_phy_id[i];
15105 	}
15106 	return NULL;
15107 }
15108 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15109 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15110 {
15111 	u32 val;
15112 
15113 	tp->phy_id = TG3_PHY_ID_INVALID;
15114 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15115 
15116 	/* Assume an onboard device and WOL capable by default.  */
15117 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15118 	tg3_flag_set(tp, WOL_CAP);
15119 
15120 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15121 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15122 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15123 			tg3_flag_set(tp, IS_NIC);
15124 		}
15125 		val = tr32(VCPU_CFGSHDW);
15126 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15127 			tg3_flag_set(tp, ASPM_WORKAROUND);
15128 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15129 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15130 			tg3_flag_set(tp, WOL_ENABLE);
15131 			device_set_wakeup_enable(&tp->pdev->dev, true);
15132 		}
15133 		goto done;
15134 	}
15135 
15136 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15137 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15138 		u32 nic_cfg, led_cfg;
15139 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15140 		u32 nic_phy_id, ver, eeprom_phy_id;
15141 		int eeprom_phy_serdes = 0;
15142 
15143 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15144 		tp->nic_sram_data_cfg = nic_cfg;
15145 
15146 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15147 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15148 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15149 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15150 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15151 		    (ver > 0) && (ver < 0x100))
15152 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15153 
15154 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15155 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15156 
15157 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15158 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15159 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15160 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15161 
15162 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15163 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15164 			eeprom_phy_serdes = 1;
15165 
15166 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15167 		if (nic_phy_id != 0) {
15168 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15169 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15170 
15171 			eeprom_phy_id  = (id1 >> 16) << 10;
15172 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15173 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15174 		} else
15175 			eeprom_phy_id = 0;
15176 
15177 		tp->phy_id = eeprom_phy_id;
15178 		if (eeprom_phy_serdes) {
15179 			if (!tg3_flag(tp, 5705_PLUS))
15180 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15181 			else
15182 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15183 		}
15184 
15185 		if (tg3_flag(tp, 5750_PLUS))
15186 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15187 				    SHASTA_EXT_LED_MODE_MASK);
15188 		else
15189 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15190 
15191 		switch (led_cfg) {
15192 		default:
15193 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15194 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15195 			break;
15196 
15197 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15198 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15199 			break;
15200 
15201 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15202 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15203 
15204 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15205 			 * read on some older 5700/5701 bootcode.
15206 			 */
15207 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15208 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15209 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15210 
15211 			break;
15212 
15213 		case SHASTA_EXT_LED_SHARED:
15214 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15215 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15216 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15217 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15218 						 LED_CTRL_MODE_PHY_2);
15219 
15220 			if (tg3_flag(tp, 5717_PLUS) ||
15221 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15222 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15223 						LED_CTRL_BLINK_RATE_MASK;
15224 
15225 			break;
15226 
15227 		case SHASTA_EXT_LED_MAC:
15228 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15229 			break;
15230 
15231 		case SHASTA_EXT_LED_COMBO:
15232 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15233 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15234 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15235 						 LED_CTRL_MODE_PHY_2);
15236 			break;
15237 
15238 		}
15239 
15240 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15241 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15242 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15243 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15244 
15245 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15246 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15247 
15248 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15249 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15250 			if ((tp->pdev->subsystem_vendor ==
15251 			     PCI_VENDOR_ID_ARIMA) &&
15252 			    (tp->pdev->subsystem_device == 0x205a ||
15253 			     tp->pdev->subsystem_device == 0x2063))
15254 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15255 		} else {
15256 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15257 			tg3_flag_set(tp, IS_NIC);
15258 		}
15259 
15260 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15261 			tg3_flag_set(tp, ENABLE_ASF);
15262 			if (tg3_flag(tp, 5750_PLUS))
15263 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15264 		}
15265 
15266 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15267 		    tg3_flag(tp, 5750_PLUS))
15268 			tg3_flag_set(tp, ENABLE_APE);
15269 
15270 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15271 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15272 			tg3_flag_clear(tp, WOL_CAP);
15273 
15274 		if (tg3_flag(tp, WOL_CAP) &&
15275 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15276 			tg3_flag_set(tp, WOL_ENABLE);
15277 			device_set_wakeup_enable(&tp->pdev->dev, true);
15278 		}
15279 
15280 		if (cfg2 & (1 << 17))
15281 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15282 
15283 		/* serdes signal pre-emphasis in register 0x590 set by */
15284 		/* bootcode if bit 18 is set */
15285 		if (cfg2 & (1 << 18))
15286 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15287 
15288 		if ((tg3_flag(tp, 57765_PLUS) ||
15289 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15290 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15291 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15292 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15293 
15294 		if (tg3_flag(tp, PCI_EXPRESS)) {
15295 			u32 cfg3;
15296 
15297 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15298 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15299 			    !tg3_flag(tp, 57765_PLUS) &&
15300 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15301 				tg3_flag_set(tp, ASPM_WORKAROUND);
15302 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15303 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15304 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15305 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15306 		}
15307 
15308 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15309 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15310 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15311 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15312 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15313 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15314 
15315 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15316 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15317 	}
15318 done:
15319 	if (tg3_flag(tp, WOL_CAP))
15320 		device_set_wakeup_enable(&tp->pdev->dev,
15321 					 tg3_flag(tp, WOL_ENABLE));
15322 	else
15323 		device_set_wakeup_capable(&tp->pdev->dev, false);
15324 }
15325 
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15326 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15327 {
15328 	int i, err;
15329 	u32 val2, off = offset * 8;
15330 
15331 	err = tg3_nvram_lock(tp);
15332 	if (err)
15333 		return err;
15334 
15335 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15336 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15337 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15338 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15339 	udelay(10);
15340 
15341 	for (i = 0; i < 100; i++) {
15342 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15343 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15344 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15345 			break;
15346 		}
15347 		udelay(10);
15348 	}
15349 
15350 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15351 
15352 	tg3_nvram_unlock(tp);
15353 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15354 		return 0;
15355 
15356 	return -EBUSY;
15357 }
15358 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15359 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15360 {
15361 	int i;
15362 	u32 val;
15363 
15364 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15365 	tw32(OTP_CTRL, cmd);
15366 
15367 	/* Wait for up to 1 ms for command to execute. */
15368 	for (i = 0; i < 100; i++) {
15369 		val = tr32(OTP_STATUS);
15370 		if (val & OTP_STATUS_CMD_DONE)
15371 			break;
15372 		udelay(10);
15373 	}
15374 
15375 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15376 }
15377 
15378 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15379  * configuration is a 32-bit value that straddles the alignment boundary.
15380  * We do two 32-bit reads and then shift and merge the results.
15381  */
tg3_read_otp_phycfg(struct tg3 * tp)15382 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15383 {
15384 	u32 bhalf_otp, thalf_otp;
15385 
15386 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15387 
15388 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15389 		return 0;
15390 
15391 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15392 
15393 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15394 		return 0;
15395 
15396 	thalf_otp = tr32(OTP_READ_DATA);
15397 
15398 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15399 
15400 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15401 		return 0;
15402 
15403 	bhalf_otp = tr32(OTP_READ_DATA);
15404 
15405 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15406 }
15407 
tg3_phy_init_link_config(struct tg3 * tp)15408 static void tg3_phy_init_link_config(struct tg3 *tp)
15409 {
15410 	u32 adv = ADVERTISED_Autoneg;
15411 
15412 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15413 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15414 			adv |= ADVERTISED_1000baseT_Half;
15415 		adv |= ADVERTISED_1000baseT_Full;
15416 	}
15417 
15418 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15419 		adv |= ADVERTISED_100baseT_Half |
15420 		       ADVERTISED_100baseT_Full |
15421 		       ADVERTISED_10baseT_Half |
15422 		       ADVERTISED_10baseT_Full |
15423 		       ADVERTISED_TP;
15424 	else
15425 		adv |= ADVERTISED_FIBRE;
15426 
15427 	tp->link_config.advertising = adv;
15428 	tp->link_config.speed = SPEED_UNKNOWN;
15429 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15430 	tp->link_config.autoneg = AUTONEG_ENABLE;
15431 	tp->link_config.active_speed = SPEED_UNKNOWN;
15432 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15433 
15434 	tp->old_link = -1;
15435 }
15436 
tg3_phy_probe(struct tg3 * tp)15437 static int tg3_phy_probe(struct tg3 *tp)
15438 {
15439 	u32 hw_phy_id_1, hw_phy_id_2;
15440 	u32 hw_phy_id, hw_phy_id_masked;
15441 	int err;
15442 
15443 	/* flow control autonegotiation is default behavior */
15444 	tg3_flag_set(tp, PAUSE_AUTONEG);
15445 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15446 
15447 	if (tg3_flag(tp, ENABLE_APE)) {
15448 		switch (tp->pci_fn) {
15449 		case 0:
15450 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15451 			break;
15452 		case 1:
15453 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15454 			break;
15455 		case 2:
15456 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15457 			break;
15458 		case 3:
15459 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15460 			break;
15461 		}
15462 	}
15463 
15464 	if (!tg3_flag(tp, ENABLE_ASF) &&
15465 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15466 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15467 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15468 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15469 
15470 	if (tg3_flag(tp, USE_PHYLIB))
15471 		return tg3_phy_init(tp);
15472 
15473 	/* Reading the PHY ID register can conflict with ASF
15474 	 * firmware access to the PHY hardware.
15475 	 */
15476 	err = 0;
15477 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15478 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15479 	} else {
15480 		/* Now read the physical PHY_ID from the chip and verify
15481 		 * that it is sane.  If it doesn't look good, we fall back
15482 		 * to either the hard-coded table based PHY_ID and failing
15483 		 * that the value found in the eeprom area.
15484 		 */
15485 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15486 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15487 
15488 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15489 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15490 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15491 
15492 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15493 	}
15494 
15495 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15496 		tp->phy_id = hw_phy_id;
15497 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15498 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15499 		else
15500 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15501 	} else {
15502 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15503 			/* Do nothing, phy ID already set up in
15504 			 * tg3_get_eeprom_hw_cfg().
15505 			 */
15506 		} else {
15507 			struct subsys_tbl_ent *p;
15508 
15509 			/* No eeprom signature?  Try the hardcoded
15510 			 * subsys device table.
15511 			 */
15512 			p = tg3_lookup_by_subsys(tp);
15513 			if (p) {
15514 				tp->phy_id = p->phy_id;
15515 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15516 				/* For now we saw the IDs 0xbc050cd0,
15517 				 * 0xbc050f80 and 0xbc050c30 on devices
15518 				 * connected to an BCM4785 and there are
15519 				 * probably more. Just assume that the phy is
15520 				 * supported when it is connected to a SSB core
15521 				 * for now.
15522 				 */
15523 				return -ENODEV;
15524 			}
15525 
15526 			if (!tp->phy_id ||
15527 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15528 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15529 		}
15530 	}
15531 
15532 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15533 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15534 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15535 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15536 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15537 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15538 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15539 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15540 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15541 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15542 
15543 		tp->eee.supported = SUPPORTED_100baseT_Full |
15544 				    SUPPORTED_1000baseT_Full;
15545 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15546 				     ADVERTISED_1000baseT_Full;
15547 		tp->eee.eee_enabled = 1;
15548 		tp->eee.tx_lpi_enabled = 1;
15549 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15550 	}
15551 
15552 	tg3_phy_init_link_config(tp);
15553 
15554 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15555 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15556 	    !tg3_flag(tp, ENABLE_APE) &&
15557 	    !tg3_flag(tp, ENABLE_ASF)) {
15558 		u32 bmsr, dummy;
15559 
15560 		tg3_readphy(tp, MII_BMSR, &bmsr);
15561 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15562 		    (bmsr & BMSR_LSTATUS))
15563 			goto skip_phy_reset;
15564 
15565 		err = tg3_phy_reset(tp);
15566 		if (err)
15567 			return err;
15568 
15569 		tg3_phy_set_wirespeed(tp);
15570 
15571 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15572 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15573 					    tp->link_config.flowctrl);
15574 
15575 			tg3_writephy(tp, MII_BMCR,
15576 				     BMCR_ANENABLE | BMCR_ANRESTART);
15577 		}
15578 	}
15579 
15580 skip_phy_reset:
15581 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15582 		err = tg3_init_5401phy_dsp(tp);
15583 		if (err)
15584 			return err;
15585 
15586 		err = tg3_init_5401phy_dsp(tp);
15587 	}
15588 
15589 	return err;
15590 }
15591 
tg3_read_vpd(struct tg3 * tp)15592 static void tg3_read_vpd(struct tg3 *tp)
15593 {
15594 	u8 *vpd_data;
15595 	unsigned int len, vpdlen;
15596 	int i;
15597 
15598 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15599 	if (!vpd_data)
15600 		goto out_no_vpd;
15601 
15602 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15603 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15604 	if (i < 0)
15605 		goto partno;
15606 
15607 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15608 		goto partno;
15609 
15610 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15611 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15612 	if (i < 0)
15613 		goto partno;
15614 
15615 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15616 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15617 
15618 partno:
15619 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15620 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15621 	if (i < 0)
15622 		goto out_not_found;
15623 
15624 	if (len > TG3_BPN_SIZE)
15625 		goto out_not_found;
15626 
15627 	memcpy(tp->board_part_number, &vpd_data[i], len);
15628 
15629 out_not_found:
15630 	kfree(vpd_data);
15631 	if (tp->board_part_number[0])
15632 		return;
15633 
15634 out_no_vpd:
15635 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15636 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15637 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15638 			strcpy(tp->board_part_number, "BCM5717");
15639 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15640 			strcpy(tp->board_part_number, "BCM5718");
15641 		else
15642 			goto nomatch;
15643 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15644 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15645 			strcpy(tp->board_part_number, "BCM57780");
15646 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15647 			strcpy(tp->board_part_number, "BCM57760");
15648 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15649 			strcpy(tp->board_part_number, "BCM57790");
15650 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15651 			strcpy(tp->board_part_number, "BCM57788");
15652 		else
15653 			goto nomatch;
15654 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15655 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15656 			strcpy(tp->board_part_number, "BCM57761");
15657 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15658 			strcpy(tp->board_part_number, "BCM57765");
15659 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15660 			strcpy(tp->board_part_number, "BCM57781");
15661 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15662 			strcpy(tp->board_part_number, "BCM57785");
15663 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15664 			strcpy(tp->board_part_number, "BCM57791");
15665 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15666 			strcpy(tp->board_part_number, "BCM57795");
15667 		else
15668 			goto nomatch;
15669 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15670 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15671 			strcpy(tp->board_part_number, "BCM57762");
15672 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15673 			strcpy(tp->board_part_number, "BCM57766");
15674 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15675 			strcpy(tp->board_part_number, "BCM57782");
15676 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15677 			strcpy(tp->board_part_number, "BCM57786");
15678 		else
15679 			goto nomatch;
15680 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15681 		strcpy(tp->board_part_number, "BCM95906");
15682 	} else {
15683 nomatch:
15684 		strcpy(tp->board_part_number, "none");
15685 	}
15686 }
15687 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15688 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15689 {
15690 	u32 val;
15691 
15692 	if (tg3_nvram_read(tp, offset, &val) ||
15693 	    (val & 0xfc000000) != 0x0c000000 ||
15694 	    tg3_nvram_read(tp, offset + 4, &val) ||
15695 	    val != 0)
15696 		return 0;
15697 
15698 	return 1;
15699 }
15700 
tg3_read_bc_ver(struct tg3 * tp)15701 static void tg3_read_bc_ver(struct tg3 *tp)
15702 {
15703 	u32 val, offset, start, ver_offset;
15704 	int i, dst_off;
15705 	bool newver = false;
15706 
15707 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15708 	    tg3_nvram_read(tp, 0x4, &start))
15709 		return;
15710 
15711 	offset = tg3_nvram_logical_addr(tp, offset);
15712 
15713 	if (tg3_nvram_read(tp, offset, &val))
15714 		return;
15715 
15716 	if ((val & 0xfc000000) == 0x0c000000) {
15717 		if (tg3_nvram_read(tp, offset + 4, &val))
15718 			return;
15719 
15720 		if (val == 0)
15721 			newver = true;
15722 	}
15723 
15724 	dst_off = strlen(tp->fw_ver);
15725 
15726 	if (newver) {
15727 		if (TG3_VER_SIZE - dst_off < 16 ||
15728 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15729 			return;
15730 
15731 		offset = offset + ver_offset - start;
15732 		for (i = 0; i < 16; i += 4) {
15733 			__be32 v;
15734 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15735 				return;
15736 
15737 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15738 		}
15739 	} else {
15740 		u32 major, minor;
15741 
15742 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15743 			return;
15744 
15745 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15746 			TG3_NVM_BCVER_MAJSFT;
15747 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15748 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15749 			 "v%d.%02d", major, minor);
15750 	}
15751 }
15752 
tg3_read_hwsb_ver(struct tg3 * tp)15753 static void tg3_read_hwsb_ver(struct tg3 *tp)
15754 {
15755 	u32 val, major, minor;
15756 
15757 	/* Use native endian representation */
15758 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15759 		return;
15760 
15761 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15762 		TG3_NVM_HWSB_CFG1_MAJSFT;
15763 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15764 		TG3_NVM_HWSB_CFG1_MINSFT;
15765 
15766 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15767 }
15768 
tg3_read_sb_ver(struct tg3 * tp,u32 val)15769 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15770 {
15771 	u32 offset, major, minor, build;
15772 
15773 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15774 
15775 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15776 		return;
15777 
15778 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15779 	case TG3_EEPROM_SB_REVISION_0:
15780 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15781 		break;
15782 	case TG3_EEPROM_SB_REVISION_2:
15783 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15784 		break;
15785 	case TG3_EEPROM_SB_REVISION_3:
15786 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15787 		break;
15788 	case TG3_EEPROM_SB_REVISION_4:
15789 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15790 		break;
15791 	case TG3_EEPROM_SB_REVISION_5:
15792 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15793 		break;
15794 	case TG3_EEPROM_SB_REVISION_6:
15795 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15796 		break;
15797 	default:
15798 		return;
15799 	}
15800 
15801 	if (tg3_nvram_read(tp, offset, &val))
15802 		return;
15803 
15804 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15805 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15806 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15807 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15808 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15809 
15810 	if (minor > 99 || build > 26)
15811 		return;
15812 
15813 	offset = strlen(tp->fw_ver);
15814 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15815 		 " v%d.%02d", major, minor);
15816 
15817 	if (build > 0) {
15818 		offset = strlen(tp->fw_ver);
15819 		if (offset < TG3_VER_SIZE - 1)
15820 			tp->fw_ver[offset] = 'a' + build - 1;
15821 	}
15822 }
15823 
tg3_read_mgmtfw_ver(struct tg3 * tp)15824 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15825 {
15826 	u32 val, offset, start;
15827 	int i, vlen;
15828 
15829 	for (offset = TG3_NVM_DIR_START;
15830 	     offset < TG3_NVM_DIR_END;
15831 	     offset += TG3_NVM_DIRENT_SIZE) {
15832 		if (tg3_nvram_read(tp, offset, &val))
15833 			return;
15834 
15835 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15836 			break;
15837 	}
15838 
15839 	if (offset == TG3_NVM_DIR_END)
15840 		return;
15841 
15842 	if (!tg3_flag(tp, 5705_PLUS))
15843 		start = 0x08000000;
15844 	else if (tg3_nvram_read(tp, offset - 4, &start))
15845 		return;
15846 
15847 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15848 	    !tg3_fw_img_is_valid(tp, offset) ||
15849 	    tg3_nvram_read(tp, offset + 8, &val))
15850 		return;
15851 
15852 	offset += val - start;
15853 
15854 	vlen = strlen(tp->fw_ver);
15855 
15856 	tp->fw_ver[vlen++] = ',';
15857 	tp->fw_ver[vlen++] = ' ';
15858 
15859 	for (i = 0; i < 4; i++) {
15860 		__be32 v;
15861 		if (tg3_nvram_read_be32(tp, offset, &v))
15862 			return;
15863 
15864 		offset += sizeof(v);
15865 
15866 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15867 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15868 			break;
15869 		}
15870 
15871 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15872 		vlen += sizeof(v);
15873 	}
15874 }
15875 
tg3_probe_ncsi(struct tg3 * tp)15876 static void tg3_probe_ncsi(struct tg3 *tp)
15877 {
15878 	u32 apedata;
15879 
15880 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15881 	if (apedata != APE_SEG_SIG_MAGIC)
15882 		return;
15883 
15884 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15885 	if (!(apedata & APE_FW_STATUS_READY))
15886 		return;
15887 
15888 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15889 		tg3_flag_set(tp, APE_HAS_NCSI);
15890 }
15891 
tg3_read_dash_ver(struct tg3 * tp)15892 static void tg3_read_dash_ver(struct tg3 *tp)
15893 {
15894 	int vlen;
15895 	u32 apedata;
15896 	char *fwtype;
15897 
15898 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15899 
15900 	if (tg3_flag(tp, APE_HAS_NCSI))
15901 		fwtype = "NCSI";
15902 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15903 		fwtype = "SMASH";
15904 	else
15905 		fwtype = "DASH";
15906 
15907 	vlen = strlen(tp->fw_ver);
15908 
15909 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15910 		 fwtype,
15911 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15912 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15913 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15914 		 (apedata & APE_FW_VERSION_BLDMSK));
15915 }
15916 
tg3_read_otp_ver(struct tg3 * tp)15917 static void tg3_read_otp_ver(struct tg3 *tp)
15918 {
15919 	u32 val, val2;
15920 
15921 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15922 		return;
15923 
15924 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15925 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15926 	    TG3_OTP_MAGIC0_VALID(val)) {
15927 		u64 val64 = (u64) val << 32 | val2;
15928 		u32 ver = 0;
15929 		int i, vlen;
15930 
15931 		for (i = 0; i < 7; i++) {
15932 			if ((val64 & 0xff) == 0)
15933 				break;
15934 			ver = val64 & 0xff;
15935 			val64 >>= 8;
15936 		}
15937 		vlen = strlen(tp->fw_ver);
15938 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15939 	}
15940 }
15941 
tg3_read_fw_ver(struct tg3 * tp)15942 static void tg3_read_fw_ver(struct tg3 *tp)
15943 {
15944 	u32 val;
15945 	bool vpd_vers = false;
15946 
15947 	if (tp->fw_ver[0] != 0)
15948 		vpd_vers = true;
15949 
15950 	if (tg3_flag(tp, NO_NVRAM)) {
15951 		strcat(tp->fw_ver, "sb");
15952 		tg3_read_otp_ver(tp);
15953 		return;
15954 	}
15955 
15956 	if (tg3_nvram_read(tp, 0, &val))
15957 		return;
15958 
15959 	if (val == TG3_EEPROM_MAGIC)
15960 		tg3_read_bc_ver(tp);
15961 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15962 		tg3_read_sb_ver(tp, val);
15963 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15964 		tg3_read_hwsb_ver(tp);
15965 
15966 	if (tg3_flag(tp, ENABLE_ASF)) {
15967 		if (tg3_flag(tp, ENABLE_APE)) {
15968 			tg3_probe_ncsi(tp);
15969 			if (!vpd_vers)
15970 				tg3_read_dash_ver(tp);
15971 		} else if (!vpd_vers) {
15972 			tg3_read_mgmtfw_ver(tp);
15973 		}
15974 	}
15975 
15976 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15977 }
15978 
tg3_rx_ret_ring_size(struct tg3 * tp)15979 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15980 {
15981 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15982 		return TG3_RX_RET_MAX_SIZE_5717;
15983 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15984 		return TG3_RX_RET_MAX_SIZE_5700;
15985 	else
15986 		return TG3_RX_RET_MAX_SIZE_5705;
15987 }
15988 
15989 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15990 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15991 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15992 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15993 	{ },
15994 };
15995 
tg3_find_peer(struct tg3 * tp)15996 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15997 {
15998 	struct pci_dev *peer;
15999 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16000 
16001 	for (func = 0; func < 8; func++) {
16002 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16003 		if (peer && peer != tp->pdev)
16004 			break;
16005 		pci_dev_put(peer);
16006 	}
16007 	/* 5704 can be configured in single-port mode, set peer to
16008 	 * tp->pdev in that case.
16009 	 */
16010 	if (!peer) {
16011 		peer = tp->pdev;
16012 		return peer;
16013 	}
16014 
16015 	/*
16016 	 * We don't need to keep the refcount elevated; there's no way
16017 	 * to remove one half of this device without removing the other
16018 	 */
16019 	pci_dev_put(peer);
16020 
16021 	return peer;
16022 }
16023 
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16024 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16025 {
16026 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16027 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16028 		u32 reg;
16029 
16030 		/* All devices that use the alternate
16031 		 * ASIC REV location have a CPMU.
16032 		 */
16033 		tg3_flag_set(tp, CPMU_PRESENT);
16034 
16035 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16036 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16037 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16038 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16039 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16040 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16041 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16042 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16043 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16044 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16045 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16046 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16047 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16048 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16049 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16050 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16051 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16052 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16053 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16054 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16055 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16056 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16057 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16058 		else
16059 			reg = TG3PCI_PRODID_ASICREV;
16060 
16061 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16062 	}
16063 
16064 	/* Wrong chip ID in 5752 A0. This code can be removed later
16065 	 * as A0 is not in production.
16066 	 */
16067 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16068 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16069 
16070 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16071 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16072 
16073 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16074 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16075 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16076 		tg3_flag_set(tp, 5717_PLUS);
16077 
16078 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16079 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16080 		tg3_flag_set(tp, 57765_CLASS);
16081 
16082 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16083 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16084 		tg3_flag_set(tp, 57765_PLUS);
16085 
16086 	/* Intentionally exclude ASIC_REV_5906 */
16087 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16088 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16089 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16090 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16091 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16092 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16093 	    tg3_flag(tp, 57765_PLUS))
16094 		tg3_flag_set(tp, 5755_PLUS);
16095 
16096 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16097 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16098 		tg3_flag_set(tp, 5780_CLASS);
16099 
16100 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16101 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16102 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16103 	    tg3_flag(tp, 5755_PLUS) ||
16104 	    tg3_flag(tp, 5780_CLASS))
16105 		tg3_flag_set(tp, 5750_PLUS);
16106 
16107 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16108 	    tg3_flag(tp, 5750_PLUS))
16109 		tg3_flag_set(tp, 5705_PLUS);
16110 }
16111 
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16112 static bool tg3_10_100_only_device(struct tg3 *tp,
16113 				   const struct pci_device_id *ent)
16114 {
16115 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16116 
16117 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16118 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16119 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16120 		return true;
16121 
16122 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16123 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16124 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16125 				return true;
16126 		} else {
16127 			return true;
16128 		}
16129 	}
16130 
16131 	return false;
16132 }
16133 
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16134 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16135 {
16136 	u32 misc_ctrl_reg;
16137 	u32 pci_state_reg, grc_misc_cfg;
16138 	u32 val;
16139 	u16 pci_cmd;
16140 	int err;
16141 
16142 	/* Force memory write invalidate off.  If we leave it on,
16143 	 * then on 5700_BX chips we have to enable a workaround.
16144 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16145 	 * to match the cacheline size.  The Broadcom driver have this
16146 	 * workaround but turns MWI off all the times so never uses
16147 	 * it.  This seems to suggest that the workaround is insufficient.
16148 	 */
16149 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16150 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16151 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16152 
16153 	/* Important! -- Make sure register accesses are byteswapped
16154 	 * correctly.  Also, for those chips that require it, make
16155 	 * sure that indirect register accesses are enabled before
16156 	 * the first operation.
16157 	 */
16158 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16159 			      &misc_ctrl_reg);
16160 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16161 			       MISC_HOST_CTRL_CHIPREV);
16162 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16163 			       tp->misc_host_ctrl);
16164 
16165 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16166 
16167 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16168 	 * we need to disable memory and use config. cycles
16169 	 * only to access all registers. The 5702/03 chips
16170 	 * can mistakenly decode the special cycles from the
16171 	 * ICH chipsets as memory write cycles, causing corruption
16172 	 * of register and memory space. Only certain ICH bridges
16173 	 * will drive special cycles with non-zero data during the
16174 	 * address phase which can fall within the 5703's address
16175 	 * range. This is not an ICH bug as the PCI spec allows
16176 	 * non-zero address during special cycles. However, only
16177 	 * these ICH bridges are known to drive non-zero addresses
16178 	 * during special cycles.
16179 	 *
16180 	 * Since special cycles do not cross PCI bridges, we only
16181 	 * enable this workaround if the 5703 is on the secondary
16182 	 * bus of these ICH bridges.
16183 	 */
16184 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16185 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16186 		static struct tg3_dev_id {
16187 			u32	vendor;
16188 			u32	device;
16189 			u32	rev;
16190 		} ich_chipsets[] = {
16191 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16192 			  PCI_ANY_ID },
16193 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16194 			  PCI_ANY_ID },
16195 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16196 			  0xa },
16197 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16198 			  PCI_ANY_ID },
16199 			{ },
16200 		};
16201 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16202 		struct pci_dev *bridge = NULL;
16203 
16204 		while (pci_id->vendor != 0) {
16205 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16206 						bridge);
16207 			if (!bridge) {
16208 				pci_id++;
16209 				continue;
16210 			}
16211 			if (pci_id->rev != PCI_ANY_ID) {
16212 				if (bridge->revision > pci_id->rev)
16213 					continue;
16214 			}
16215 			if (bridge->subordinate &&
16216 			    (bridge->subordinate->number ==
16217 			     tp->pdev->bus->number)) {
16218 				tg3_flag_set(tp, ICH_WORKAROUND);
16219 				pci_dev_put(bridge);
16220 				break;
16221 			}
16222 		}
16223 	}
16224 
16225 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16226 		static struct tg3_dev_id {
16227 			u32	vendor;
16228 			u32	device;
16229 		} bridge_chipsets[] = {
16230 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16231 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16232 			{ },
16233 		};
16234 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16235 		struct pci_dev *bridge = NULL;
16236 
16237 		while (pci_id->vendor != 0) {
16238 			bridge = pci_get_device(pci_id->vendor,
16239 						pci_id->device,
16240 						bridge);
16241 			if (!bridge) {
16242 				pci_id++;
16243 				continue;
16244 			}
16245 			if (bridge->subordinate &&
16246 			    (bridge->subordinate->number <=
16247 			     tp->pdev->bus->number) &&
16248 			    (bridge->subordinate->busn_res.end >=
16249 			     tp->pdev->bus->number)) {
16250 				tg3_flag_set(tp, 5701_DMA_BUG);
16251 				pci_dev_put(bridge);
16252 				break;
16253 			}
16254 		}
16255 	}
16256 
16257 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16258 	 * DMA addresses > 40-bit. This bridge may have other additional
16259 	 * 57xx devices behind it in some 4-port NIC designs for example.
16260 	 * Any tg3 device found behind the bridge will also need the 40-bit
16261 	 * DMA workaround.
16262 	 */
16263 	if (tg3_flag(tp, 5780_CLASS)) {
16264 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16265 		tp->msi_cap = tp->pdev->msi_cap;
16266 	} else {
16267 		struct pci_dev *bridge = NULL;
16268 
16269 		do {
16270 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16271 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16272 						bridge);
16273 			if (bridge && bridge->subordinate &&
16274 			    (bridge->subordinate->number <=
16275 			     tp->pdev->bus->number) &&
16276 			    (bridge->subordinate->busn_res.end >=
16277 			     tp->pdev->bus->number)) {
16278 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16279 				pci_dev_put(bridge);
16280 				break;
16281 			}
16282 		} while (bridge);
16283 	}
16284 
16285 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16286 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16287 		tp->pdev_peer = tg3_find_peer(tp);
16288 
16289 	/* Determine TSO capabilities */
16290 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16291 		; /* Do nothing. HW bug. */
16292 	else if (tg3_flag(tp, 57765_PLUS))
16293 		tg3_flag_set(tp, HW_TSO_3);
16294 	else if (tg3_flag(tp, 5755_PLUS) ||
16295 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16296 		tg3_flag_set(tp, HW_TSO_2);
16297 	else if (tg3_flag(tp, 5750_PLUS)) {
16298 		tg3_flag_set(tp, HW_TSO_1);
16299 		tg3_flag_set(tp, TSO_BUG);
16300 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16301 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16302 			tg3_flag_clear(tp, TSO_BUG);
16303 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16304 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16305 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16306 		tg3_flag_set(tp, FW_TSO);
16307 		tg3_flag_set(tp, TSO_BUG);
16308 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16309 			tp->fw_needed = FIRMWARE_TG3TSO5;
16310 		else
16311 			tp->fw_needed = FIRMWARE_TG3TSO;
16312 	}
16313 
16314 	/* Selectively allow TSO based on operating conditions */
16315 	if (tg3_flag(tp, HW_TSO_1) ||
16316 	    tg3_flag(tp, HW_TSO_2) ||
16317 	    tg3_flag(tp, HW_TSO_3) ||
16318 	    tg3_flag(tp, FW_TSO)) {
16319 		/* For firmware TSO, assume ASF is disabled.
16320 		 * We'll disable TSO later if we discover ASF
16321 		 * is enabled in tg3_get_eeprom_hw_cfg().
16322 		 */
16323 		tg3_flag_set(tp, TSO_CAPABLE);
16324 	} else {
16325 		tg3_flag_clear(tp, TSO_CAPABLE);
16326 		tg3_flag_clear(tp, TSO_BUG);
16327 		tp->fw_needed = NULL;
16328 	}
16329 
16330 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16331 		tp->fw_needed = FIRMWARE_TG3;
16332 
16333 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16334 		tp->fw_needed = FIRMWARE_TG357766;
16335 
16336 	tp->irq_max = 1;
16337 
16338 	if (tg3_flag(tp, 5750_PLUS)) {
16339 		tg3_flag_set(tp, SUPPORT_MSI);
16340 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16341 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16342 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16343 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16344 		     tp->pdev_peer == tp->pdev))
16345 			tg3_flag_clear(tp, SUPPORT_MSI);
16346 
16347 		if (tg3_flag(tp, 5755_PLUS) ||
16348 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16349 			tg3_flag_set(tp, 1SHOT_MSI);
16350 		}
16351 
16352 		if (tg3_flag(tp, 57765_PLUS)) {
16353 			tg3_flag_set(tp, SUPPORT_MSIX);
16354 			tp->irq_max = TG3_IRQ_MAX_VECS;
16355 		}
16356 	}
16357 
16358 	tp->txq_max = 1;
16359 	tp->rxq_max = 1;
16360 	if (tp->irq_max > 1) {
16361 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16362 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16363 
16364 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16365 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16366 			tp->txq_max = tp->irq_max - 1;
16367 	}
16368 
16369 	if (tg3_flag(tp, 5755_PLUS) ||
16370 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16371 		tg3_flag_set(tp, SHORT_DMA_BUG);
16372 
16373 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16374 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16375 
16376 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16377 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16378 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16379 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16380 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16381 
16382 	if (tg3_flag(tp, 57765_PLUS) &&
16383 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16384 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16385 
16386 	if (!tg3_flag(tp, 5705_PLUS) ||
16387 	    tg3_flag(tp, 5780_CLASS) ||
16388 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16389 		tg3_flag_set(tp, JUMBO_CAPABLE);
16390 
16391 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16392 			      &pci_state_reg);
16393 
16394 	if (pci_is_pcie(tp->pdev)) {
16395 		u16 lnkctl;
16396 
16397 		tg3_flag_set(tp, PCI_EXPRESS);
16398 
16399 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16400 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16401 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16402 				tg3_flag_clear(tp, HW_TSO_2);
16403 				tg3_flag_clear(tp, TSO_CAPABLE);
16404 			}
16405 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16406 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16407 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16408 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16409 				tg3_flag_set(tp, CLKREQ_BUG);
16410 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16411 			tg3_flag_set(tp, L1PLLPD_EN);
16412 		}
16413 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16414 		/* BCM5785 devices are effectively PCIe devices, and should
16415 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16416 		 * section.
16417 		 */
16418 		tg3_flag_set(tp, PCI_EXPRESS);
16419 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16420 		   tg3_flag(tp, 5780_CLASS)) {
16421 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16422 		if (!tp->pcix_cap) {
16423 			dev_err(&tp->pdev->dev,
16424 				"Cannot find PCI-X capability, aborting\n");
16425 			return -EIO;
16426 		}
16427 
16428 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16429 			tg3_flag_set(tp, PCIX_MODE);
16430 	}
16431 
16432 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16433 	 * reordering to the mailbox registers done by the host
16434 	 * controller can cause major troubles.  We read back from
16435 	 * every mailbox register write to force the writes to be
16436 	 * posted to the chip in order.
16437 	 */
16438 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16439 	    !tg3_flag(tp, PCI_EXPRESS))
16440 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16441 
16442 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16443 			     &tp->pci_cacheline_sz);
16444 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16445 			     &tp->pci_lat_timer);
16446 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16447 	    tp->pci_lat_timer < 64) {
16448 		tp->pci_lat_timer = 64;
16449 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16450 				      tp->pci_lat_timer);
16451 	}
16452 
16453 	/* Important! -- It is critical that the PCI-X hw workaround
16454 	 * situation is decided before the first MMIO register access.
16455 	 */
16456 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16457 		/* 5700 BX chips need to have their TX producer index
16458 		 * mailboxes written twice to workaround a bug.
16459 		 */
16460 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16461 
16462 		/* If we are in PCI-X mode, enable register write workaround.
16463 		 *
16464 		 * The workaround is to use indirect register accesses
16465 		 * for all chip writes not to mailbox registers.
16466 		 */
16467 		if (tg3_flag(tp, PCIX_MODE)) {
16468 			u32 pm_reg;
16469 
16470 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16471 
16472 			/* The chip can have it's power management PCI config
16473 			 * space registers clobbered due to this bug.
16474 			 * So explicitly force the chip into D0 here.
16475 			 */
16476 			pci_read_config_dword(tp->pdev,
16477 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16478 					      &pm_reg);
16479 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16480 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16481 			pci_write_config_dword(tp->pdev,
16482 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16483 					       pm_reg);
16484 
16485 			/* Also, force SERR#/PERR# in PCI command. */
16486 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16487 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16488 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16489 		}
16490 	}
16491 
16492 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16493 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16494 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16495 		tg3_flag_set(tp, PCI_32BIT);
16496 
16497 	/* Chip-specific fixup from Broadcom driver */
16498 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16499 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16500 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16501 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16502 	}
16503 
16504 	/* Default fast path register access methods */
16505 	tp->read32 = tg3_read32;
16506 	tp->write32 = tg3_write32;
16507 	tp->read32_mbox = tg3_read32;
16508 	tp->write32_mbox = tg3_write32;
16509 	tp->write32_tx_mbox = tg3_write32;
16510 	tp->write32_rx_mbox = tg3_write32;
16511 
16512 	/* Various workaround register access methods */
16513 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16514 		tp->write32 = tg3_write_indirect_reg32;
16515 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16516 		 (tg3_flag(tp, PCI_EXPRESS) &&
16517 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16518 		/*
16519 		 * Back to back register writes can cause problems on these
16520 		 * chips, the workaround is to read back all reg writes
16521 		 * except those to mailbox regs.
16522 		 *
16523 		 * See tg3_write_indirect_reg32().
16524 		 */
16525 		tp->write32 = tg3_write_flush_reg32;
16526 	}
16527 
16528 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16529 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16530 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16531 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16532 	}
16533 
16534 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16535 		tp->read32 = tg3_read_indirect_reg32;
16536 		tp->write32 = tg3_write_indirect_reg32;
16537 		tp->read32_mbox = tg3_read_indirect_mbox;
16538 		tp->write32_mbox = tg3_write_indirect_mbox;
16539 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16540 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16541 
16542 		iounmap(tp->regs);
16543 		tp->regs = NULL;
16544 
16545 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16546 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16547 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16548 	}
16549 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16550 		tp->read32_mbox = tg3_read32_mbox_5906;
16551 		tp->write32_mbox = tg3_write32_mbox_5906;
16552 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16553 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16554 	}
16555 
16556 	if (tp->write32 == tg3_write_indirect_reg32 ||
16557 	    (tg3_flag(tp, PCIX_MODE) &&
16558 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16559 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16560 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16561 
16562 	/* The memory arbiter has to be enabled in order for SRAM accesses
16563 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16564 	 * sure it is enabled, but other entities such as system netboot
16565 	 * code might disable it.
16566 	 */
16567 	val = tr32(MEMARB_MODE);
16568 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16569 
16570 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16571 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16572 	    tg3_flag(tp, 5780_CLASS)) {
16573 		if (tg3_flag(tp, PCIX_MODE)) {
16574 			pci_read_config_dword(tp->pdev,
16575 					      tp->pcix_cap + PCI_X_STATUS,
16576 					      &val);
16577 			tp->pci_fn = val & 0x7;
16578 		}
16579 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16580 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16581 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16582 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16583 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16584 			val = tr32(TG3_CPMU_STATUS);
16585 
16586 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16587 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16588 		else
16589 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16590 				     TG3_CPMU_STATUS_FSHFT_5719;
16591 	}
16592 
16593 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16594 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16595 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16596 	}
16597 
16598 	/* Get eeprom hw config before calling tg3_set_power_state().
16599 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16600 	 * determined before calling tg3_set_power_state() so that
16601 	 * we know whether or not to switch out of Vaux power.
16602 	 * When the flag is set, it means that GPIO1 is used for eeprom
16603 	 * write protect and also implies that it is a LOM where GPIOs
16604 	 * are not used to switch power.
16605 	 */
16606 	tg3_get_eeprom_hw_cfg(tp);
16607 
16608 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16609 		tg3_flag_clear(tp, TSO_CAPABLE);
16610 		tg3_flag_clear(tp, TSO_BUG);
16611 		tp->fw_needed = NULL;
16612 	}
16613 
16614 	if (tg3_flag(tp, ENABLE_APE)) {
16615 		/* Allow reads and writes to the
16616 		 * APE register and memory space.
16617 		 */
16618 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16619 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16620 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16621 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16622 				       pci_state_reg);
16623 
16624 		tg3_ape_lock_init(tp);
16625 		tp->ape_hb_interval =
16626 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16627 	}
16628 
16629 	/* Set up tp->grc_local_ctrl before calling
16630 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16631 	 * will bring 5700's external PHY out of reset.
16632 	 * It is also used as eeprom write protect on LOMs.
16633 	 */
16634 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16635 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16636 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16637 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16638 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16639 	/* Unused GPIO3 must be driven as output on 5752 because there
16640 	 * are no pull-up resistors on unused GPIO pins.
16641 	 */
16642 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16643 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16644 
16645 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16646 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16647 	    tg3_flag(tp, 57765_CLASS))
16648 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16649 
16650 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16651 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16652 		/* Turn off the debug UART. */
16653 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16654 		if (tg3_flag(tp, IS_NIC))
16655 			/* Keep VMain power. */
16656 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16657 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16658 	}
16659 
16660 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16661 		tp->grc_local_ctrl |=
16662 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16663 
16664 	/* Switch out of Vaux if it is a NIC */
16665 	tg3_pwrsrc_switch_to_vmain(tp);
16666 
16667 	/* Derive initial jumbo mode from MTU assigned in
16668 	 * ether_setup() via the alloc_etherdev() call
16669 	 */
16670 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16671 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16672 
16673 	/* Determine WakeOnLan speed to use. */
16674 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16675 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16676 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16677 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16678 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16679 	} else {
16680 		tg3_flag_set(tp, WOL_SPEED_100MB);
16681 	}
16682 
16683 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16684 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16685 
16686 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16687 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16688 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16689 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16690 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16691 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16692 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16693 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16694 
16695 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16696 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16697 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16698 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16699 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16700 
16701 	if (tg3_flag(tp, 5705_PLUS) &&
16702 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16703 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16704 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16705 	    !tg3_flag(tp, 57765_PLUS)) {
16706 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16707 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16708 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16709 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16710 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16711 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16712 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16713 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16714 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16715 		} else
16716 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16717 	}
16718 
16719 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16720 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16721 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16722 		if (tp->phy_otp == 0)
16723 			tp->phy_otp = TG3_OTP_DEFAULT;
16724 	}
16725 
16726 	if (tg3_flag(tp, CPMU_PRESENT))
16727 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16728 	else
16729 		tp->mi_mode = MAC_MI_MODE_BASE;
16730 
16731 	tp->coalesce_mode = 0;
16732 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16733 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16734 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16735 
16736 	/* Set these bits to enable statistics workaround. */
16737 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16738 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16739 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16740 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16741 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16742 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16743 	}
16744 
16745 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16746 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16747 		tg3_flag_set(tp, USE_PHYLIB);
16748 
16749 	err = tg3_mdio_init(tp);
16750 	if (err)
16751 		return err;
16752 
16753 	/* Initialize data/descriptor byte/word swapping. */
16754 	val = tr32(GRC_MODE);
16755 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16756 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16757 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16758 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16759 			GRC_MODE_B2HRX_ENABLE |
16760 			GRC_MODE_HTX2B_ENABLE |
16761 			GRC_MODE_HOST_STACKUP);
16762 	else
16763 		val &= GRC_MODE_HOST_STACKUP;
16764 
16765 	tw32(GRC_MODE, val | tp->grc_mode);
16766 
16767 	tg3_switch_clocks(tp);
16768 
16769 	/* Clear this out for sanity. */
16770 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16771 
16772 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16773 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16774 
16775 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16776 			      &pci_state_reg);
16777 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16778 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16779 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16780 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16781 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16782 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16783 			void __iomem *sram_base;
16784 
16785 			/* Write some dummy words into the SRAM status block
16786 			 * area, see if it reads back correctly.  If the return
16787 			 * value is bad, force enable the PCIX workaround.
16788 			 */
16789 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16790 
16791 			writel(0x00000000, sram_base);
16792 			writel(0x00000000, sram_base + 4);
16793 			writel(0xffffffff, sram_base + 4);
16794 			if (readl(sram_base) != 0x00000000)
16795 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16796 		}
16797 	}
16798 
16799 	udelay(50);
16800 	tg3_nvram_init(tp);
16801 
16802 	/* If the device has an NVRAM, no need to load patch firmware */
16803 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16804 	    !tg3_flag(tp, NO_NVRAM))
16805 		tp->fw_needed = NULL;
16806 
16807 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16808 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16809 
16810 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16811 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16812 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16813 		tg3_flag_set(tp, IS_5788);
16814 
16815 	if (!tg3_flag(tp, IS_5788) &&
16816 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16817 		tg3_flag_set(tp, TAGGED_STATUS);
16818 	if (tg3_flag(tp, TAGGED_STATUS)) {
16819 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16820 				      HOSTCC_MODE_CLRTICK_TXBD);
16821 
16822 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16823 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16824 				       tp->misc_host_ctrl);
16825 	}
16826 
16827 	/* Preserve the APE MAC_MODE bits */
16828 	if (tg3_flag(tp, ENABLE_APE))
16829 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16830 	else
16831 		tp->mac_mode = 0;
16832 
16833 	if (tg3_10_100_only_device(tp, ent))
16834 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16835 
16836 	err = tg3_phy_probe(tp);
16837 	if (err) {
16838 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16839 		/* ... but do not return immediately ... */
16840 		tg3_mdio_fini(tp);
16841 	}
16842 
16843 	tg3_read_vpd(tp);
16844 	tg3_read_fw_ver(tp);
16845 
16846 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16847 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16848 	} else {
16849 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16850 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16851 		else
16852 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16853 	}
16854 
16855 	/* 5700 {AX,BX} chips have a broken status block link
16856 	 * change bit implementation, so we must use the
16857 	 * status register in those cases.
16858 	 */
16859 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16860 		tg3_flag_set(tp, USE_LINKCHG_REG);
16861 	else
16862 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16863 
16864 	/* The led_ctrl is set during tg3_phy_probe, here we might
16865 	 * have to force the link status polling mechanism based
16866 	 * upon subsystem IDs.
16867 	 */
16868 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16869 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16870 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16871 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16872 		tg3_flag_set(tp, USE_LINKCHG_REG);
16873 	}
16874 
16875 	/* For all SERDES we poll the MAC status register. */
16876 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16877 		tg3_flag_set(tp, POLL_SERDES);
16878 	else
16879 		tg3_flag_clear(tp, POLL_SERDES);
16880 
16881 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16882 		tg3_flag_set(tp, POLL_CPMU_LINK);
16883 
16884 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16885 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16886 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16887 	    tg3_flag(tp, PCIX_MODE)) {
16888 		tp->rx_offset = NET_SKB_PAD;
16889 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16890 		tp->rx_copy_thresh = ~(u16)0;
16891 #endif
16892 	}
16893 
16894 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16895 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16896 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16897 
16898 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16899 
16900 	/* Increment the rx prod index on the rx std ring by at most
16901 	 * 8 for these chips to workaround hw errata.
16902 	 */
16903 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16904 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16905 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16906 		tp->rx_std_max_post = 8;
16907 
16908 	if (tg3_flag(tp, ASPM_WORKAROUND))
16909 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16910 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16911 
16912 	return err;
16913 }
16914 
tg3_get_device_address(struct tg3 * tp,u8 * addr)16915 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16916 {
16917 	u32 hi, lo, mac_offset;
16918 	int addr_ok = 0;
16919 	int err;
16920 
16921 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16922 		return 0;
16923 
16924 	if (tg3_flag(tp, IS_SSB_CORE)) {
16925 		err = ssb_gige_get_macaddr(tp->pdev, addr);
16926 		if (!err && is_valid_ether_addr(addr))
16927 			return 0;
16928 	}
16929 
16930 	mac_offset = 0x7c;
16931 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16932 	    tg3_flag(tp, 5780_CLASS)) {
16933 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16934 			mac_offset = 0xcc;
16935 		if (tg3_nvram_lock(tp))
16936 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16937 		else
16938 			tg3_nvram_unlock(tp);
16939 	} else if (tg3_flag(tp, 5717_PLUS)) {
16940 		if (tp->pci_fn & 1)
16941 			mac_offset = 0xcc;
16942 		if (tp->pci_fn > 1)
16943 			mac_offset += 0x18c;
16944 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16945 		mac_offset = 0x10;
16946 
16947 	/* First try to get it from MAC address mailbox. */
16948 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16949 	if ((hi >> 16) == 0x484b) {
16950 		addr[0] = (hi >>  8) & 0xff;
16951 		addr[1] = (hi >>  0) & 0xff;
16952 
16953 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16954 		addr[2] = (lo >> 24) & 0xff;
16955 		addr[3] = (lo >> 16) & 0xff;
16956 		addr[4] = (lo >>  8) & 0xff;
16957 		addr[5] = (lo >>  0) & 0xff;
16958 
16959 		/* Some old bootcode may report a 0 MAC address in SRAM */
16960 		addr_ok = is_valid_ether_addr(addr);
16961 	}
16962 	if (!addr_ok) {
16963 		/* Next, try NVRAM. */
16964 		if (!tg3_flag(tp, NO_NVRAM) &&
16965 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16966 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16967 			memcpy(&addr[0], ((char *)&hi) + 2, 2);
16968 			memcpy(&addr[2], (char *)&lo, sizeof(lo));
16969 		}
16970 		/* Finally just fetch it out of the MAC control regs. */
16971 		else {
16972 			hi = tr32(MAC_ADDR_0_HIGH);
16973 			lo = tr32(MAC_ADDR_0_LOW);
16974 
16975 			addr[5] = lo & 0xff;
16976 			addr[4] = (lo >> 8) & 0xff;
16977 			addr[3] = (lo >> 16) & 0xff;
16978 			addr[2] = (lo >> 24) & 0xff;
16979 			addr[1] = hi & 0xff;
16980 			addr[0] = (hi >> 8) & 0xff;
16981 		}
16982 	}
16983 
16984 	if (!is_valid_ether_addr(addr))
16985 		return -EINVAL;
16986 	return 0;
16987 }
16988 
16989 #define BOUNDARY_SINGLE_CACHELINE	1
16990 #define BOUNDARY_MULTI_CACHELINE	2
16991 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)16992 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16993 {
16994 	int cacheline_size;
16995 	u8 byte;
16996 	int goal;
16997 
16998 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16999 	if (byte == 0)
17000 		cacheline_size = 1024;
17001 	else
17002 		cacheline_size = (int) byte * 4;
17003 
17004 	/* On 5703 and later chips, the boundary bits have no
17005 	 * effect.
17006 	 */
17007 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17008 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17009 	    !tg3_flag(tp, PCI_EXPRESS))
17010 		goto out;
17011 
17012 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17013 	goal = BOUNDARY_MULTI_CACHELINE;
17014 #else
17015 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17016 	goal = BOUNDARY_SINGLE_CACHELINE;
17017 #else
17018 	goal = 0;
17019 #endif
17020 #endif
17021 
17022 	if (tg3_flag(tp, 57765_PLUS)) {
17023 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17024 		goto out;
17025 	}
17026 
17027 	if (!goal)
17028 		goto out;
17029 
17030 	/* PCI controllers on most RISC systems tend to disconnect
17031 	 * when a device tries to burst across a cache-line boundary.
17032 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17033 	 *
17034 	 * Unfortunately, for PCI-E there are only limited
17035 	 * write-side controls for this, and thus for reads
17036 	 * we will still get the disconnects.  We'll also waste
17037 	 * these PCI cycles for both read and write for chips
17038 	 * other than 5700 and 5701 which do not implement the
17039 	 * boundary bits.
17040 	 */
17041 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17042 		switch (cacheline_size) {
17043 		case 16:
17044 		case 32:
17045 		case 64:
17046 		case 128:
17047 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17048 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17049 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17050 			} else {
17051 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17052 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17053 			}
17054 			break;
17055 
17056 		case 256:
17057 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17058 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17059 			break;
17060 
17061 		default:
17062 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17063 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17064 			break;
17065 		}
17066 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17067 		switch (cacheline_size) {
17068 		case 16:
17069 		case 32:
17070 		case 64:
17071 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17072 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17073 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17074 				break;
17075 			}
17076 			fallthrough;
17077 		case 128:
17078 		default:
17079 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17080 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17081 			break;
17082 		}
17083 	} else {
17084 		switch (cacheline_size) {
17085 		case 16:
17086 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17087 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17088 					DMA_RWCTRL_WRITE_BNDRY_16);
17089 				break;
17090 			}
17091 			fallthrough;
17092 		case 32:
17093 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17094 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17095 					DMA_RWCTRL_WRITE_BNDRY_32);
17096 				break;
17097 			}
17098 			fallthrough;
17099 		case 64:
17100 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17101 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17102 					DMA_RWCTRL_WRITE_BNDRY_64);
17103 				break;
17104 			}
17105 			fallthrough;
17106 		case 128:
17107 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17108 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17109 					DMA_RWCTRL_WRITE_BNDRY_128);
17110 				break;
17111 			}
17112 			fallthrough;
17113 		case 256:
17114 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17115 				DMA_RWCTRL_WRITE_BNDRY_256);
17116 			break;
17117 		case 512:
17118 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17119 				DMA_RWCTRL_WRITE_BNDRY_512);
17120 			break;
17121 		case 1024:
17122 		default:
17123 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17124 				DMA_RWCTRL_WRITE_BNDRY_1024);
17125 			break;
17126 		}
17127 	}
17128 
17129 out:
17130 	return val;
17131 }
17132 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17133 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17134 			   int size, bool to_device)
17135 {
17136 	struct tg3_internal_buffer_desc test_desc;
17137 	u32 sram_dma_descs;
17138 	int i, ret;
17139 
17140 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17141 
17142 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17143 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17144 	tw32(RDMAC_STATUS, 0);
17145 	tw32(WDMAC_STATUS, 0);
17146 
17147 	tw32(BUFMGR_MODE, 0);
17148 	tw32(FTQ_RESET, 0);
17149 
17150 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17151 	test_desc.addr_lo = buf_dma & 0xffffffff;
17152 	test_desc.nic_mbuf = 0x00002100;
17153 	test_desc.len = size;
17154 
17155 	/*
17156 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17157 	 * the *second* time the tg3 driver was getting loaded after an
17158 	 * initial scan.
17159 	 *
17160 	 * Broadcom tells me:
17161 	 *   ...the DMA engine is connected to the GRC block and a DMA
17162 	 *   reset may affect the GRC block in some unpredictable way...
17163 	 *   The behavior of resets to individual blocks has not been tested.
17164 	 *
17165 	 * Broadcom noted the GRC reset will also reset all sub-components.
17166 	 */
17167 	if (to_device) {
17168 		test_desc.cqid_sqid = (13 << 8) | 2;
17169 
17170 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17171 		udelay(40);
17172 	} else {
17173 		test_desc.cqid_sqid = (16 << 8) | 7;
17174 
17175 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17176 		udelay(40);
17177 	}
17178 	test_desc.flags = 0x00000005;
17179 
17180 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17181 		u32 val;
17182 
17183 		val = *(((u32 *)&test_desc) + i);
17184 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17185 				       sram_dma_descs + (i * sizeof(u32)));
17186 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17187 	}
17188 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17189 
17190 	if (to_device)
17191 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17192 	else
17193 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17194 
17195 	ret = -ENODEV;
17196 	for (i = 0; i < 40; i++) {
17197 		u32 val;
17198 
17199 		if (to_device)
17200 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17201 		else
17202 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17203 		if ((val & 0xffff) == sram_dma_descs) {
17204 			ret = 0;
17205 			break;
17206 		}
17207 
17208 		udelay(100);
17209 	}
17210 
17211 	return ret;
17212 }
17213 
17214 #define TEST_BUFFER_SIZE	0x2000
17215 
17216 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17217 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17218 	{ },
17219 };
17220 
tg3_test_dma(struct tg3 * tp)17221 static int tg3_test_dma(struct tg3 *tp)
17222 {
17223 	dma_addr_t buf_dma;
17224 	u32 *buf, saved_dma_rwctrl;
17225 	int ret = 0;
17226 
17227 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17228 				 &buf_dma, GFP_KERNEL);
17229 	if (!buf) {
17230 		ret = -ENOMEM;
17231 		goto out_nofree;
17232 	}
17233 
17234 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17235 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17236 
17237 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17238 
17239 	if (tg3_flag(tp, 57765_PLUS))
17240 		goto out;
17241 
17242 	if (tg3_flag(tp, PCI_EXPRESS)) {
17243 		/* DMA read watermark not used on PCIE */
17244 		tp->dma_rwctrl |= 0x00180000;
17245 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17246 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17247 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17248 			tp->dma_rwctrl |= 0x003f0000;
17249 		else
17250 			tp->dma_rwctrl |= 0x003f000f;
17251 	} else {
17252 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17253 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17254 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17255 			u32 read_water = 0x7;
17256 
17257 			/* If the 5704 is behind the EPB bridge, we can
17258 			 * do the less restrictive ONE_DMA workaround for
17259 			 * better performance.
17260 			 */
17261 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17262 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17263 				tp->dma_rwctrl |= 0x8000;
17264 			else if (ccval == 0x6 || ccval == 0x7)
17265 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17266 
17267 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17268 				read_water = 4;
17269 			/* Set bit 23 to enable PCIX hw bug fix */
17270 			tp->dma_rwctrl |=
17271 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17272 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17273 				(1 << 23);
17274 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17275 			/* 5780 always in PCIX mode */
17276 			tp->dma_rwctrl |= 0x00144000;
17277 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17278 			/* 5714 always in PCIX mode */
17279 			tp->dma_rwctrl |= 0x00148000;
17280 		} else {
17281 			tp->dma_rwctrl |= 0x001b000f;
17282 		}
17283 	}
17284 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17285 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17286 
17287 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17288 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17289 		tp->dma_rwctrl &= 0xfffffff0;
17290 
17291 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17292 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17293 		/* Remove this if it causes problems for some boards. */
17294 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17295 
17296 		/* On 5700/5701 chips, we need to set this bit.
17297 		 * Otherwise the chip will issue cacheline transactions
17298 		 * to streamable DMA memory with not all the byte
17299 		 * enables turned on.  This is an error on several
17300 		 * RISC PCI controllers, in particular sparc64.
17301 		 *
17302 		 * On 5703/5704 chips, this bit has been reassigned
17303 		 * a different meaning.  In particular, it is used
17304 		 * on those chips to enable a PCI-X workaround.
17305 		 */
17306 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17307 	}
17308 
17309 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17310 
17311 
17312 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17313 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17314 		goto out;
17315 
17316 	/* It is best to perform DMA test with maximum write burst size
17317 	 * to expose the 5700/5701 write DMA bug.
17318 	 */
17319 	saved_dma_rwctrl = tp->dma_rwctrl;
17320 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17321 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17322 
17323 	while (1) {
17324 		u32 *p = buf, i;
17325 
17326 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17327 			p[i] = i;
17328 
17329 		/* Send the buffer to the chip. */
17330 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17331 		if (ret) {
17332 			dev_err(&tp->pdev->dev,
17333 				"%s: Buffer write failed. err = %d\n",
17334 				__func__, ret);
17335 			break;
17336 		}
17337 
17338 		/* Now read it back. */
17339 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17340 		if (ret) {
17341 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17342 				"err = %d\n", __func__, ret);
17343 			break;
17344 		}
17345 
17346 		/* Verify it. */
17347 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17348 			if (p[i] == i)
17349 				continue;
17350 
17351 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17352 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17353 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17354 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17355 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17356 				break;
17357 			} else {
17358 				dev_err(&tp->pdev->dev,
17359 					"%s: Buffer corrupted on read back! "
17360 					"(%d != %d)\n", __func__, p[i], i);
17361 				ret = -ENODEV;
17362 				goto out;
17363 			}
17364 		}
17365 
17366 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17367 			/* Success. */
17368 			ret = 0;
17369 			break;
17370 		}
17371 	}
17372 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17373 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17374 		/* DMA test passed without adjusting DMA boundary,
17375 		 * now look for chipsets that are known to expose the
17376 		 * DMA bug without failing the test.
17377 		 */
17378 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17379 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17380 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17381 		} else {
17382 			/* Safe to use the calculated DMA boundary. */
17383 			tp->dma_rwctrl = saved_dma_rwctrl;
17384 		}
17385 
17386 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387 	}
17388 
17389 out:
17390 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17391 out_nofree:
17392 	return ret;
17393 }
17394 
tg3_init_bufmgr_config(struct tg3 * tp)17395 static void tg3_init_bufmgr_config(struct tg3 *tp)
17396 {
17397 	if (tg3_flag(tp, 57765_PLUS)) {
17398 		tp->bufmgr_config.mbuf_read_dma_low_water =
17399 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17400 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17401 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17402 		tp->bufmgr_config.mbuf_high_water =
17403 			DEFAULT_MB_HIGH_WATER_57765;
17404 
17405 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17406 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17407 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17408 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17409 		tp->bufmgr_config.mbuf_high_water_jumbo =
17410 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17411 	} else if (tg3_flag(tp, 5705_PLUS)) {
17412 		tp->bufmgr_config.mbuf_read_dma_low_water =
17413 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17414 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17415 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17416 		tp->bufmgr_config.mbuf_high_water =
17417 			DEFAULT_MB_HIGH_WATER_5705;
17418 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17419 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17420 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17421 			tp->bufmgr_config.mbuf_high_water =
17422 				DEFAULT_MB_HIGH_WATER_5906;
17423 		}
17424 
17425 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17426 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17427 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17428 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17429 		tp->bufmgr_config.mbuf_high_water_jumbo =
17430 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17431 	} else {
17432 		tp->bufmgr_config.mbuf_read_dma_low_water =
17433 			DEFAULT_MB_RDMA_LOW_WATER;
17434 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17435 			DEFAULT_MB_MACRX_LOW_WATER;
17436 		tp->bufmgr_config.mbuf_high_water =
17437 			DEFAULT_MB_HIGH_WATER;
17438 
17439 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17440 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17441 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17442 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17443 		tp->bufmgr_config.mbuf_high_water_jumbo =
17444 			DEFAULT_MB_HIGH_WATER_JUMBO;
17445 	}
17446 
17447 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17448 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17449 }
17450 
tg3_phy_string(struct tg3 * tp)17451 static char *tg3_phy_string(struct tg3 *tp)
17452 {
17453 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17454 	case TG3_PHY_ID_BCM5400:	return "5400";
17455 	case TG3_PHY_ID_BCM5401:	return "5401";
17456 	case TG3_PHY_ID_BCM5411:	return "5411";
17457 	case TG3_PHY_ID_BCM5701:	return "5701";
17458 	case TG3_PHY_ID_BCM5703:	return "5703";
17459 	case TG3_PHY_ID_BCM5704:	return "5704";
17460 	case TG3_PHY_ID_BCM5705:	return "5705";
17461 	case TG3_PHY_ID_BCM5750:	return "5750";
17462 	case TG3_PHY_ID_BCM5752:	return "5752";
17463 	case TG3_PHY_ID_BCM5714:	return "5714";
17464 	case TG3_PHY_ID_BCM5780:	return "5780";
17465 	case TG3_PHY_ID_BCM5755:	return "5755";
17466 	case TG3_PHY_ID_BCM5787:	return "5787";
17467 	case TG3_PHY_ID_BCM5784:	return "5784";
17468 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17469 	case TG3_PHY_ID_BCM5906:	return "5906";
17470 	case TG3_PHY_ID_BCM5761:	return "5761";
17471 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17472 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17473 	case TG3_PHY_ID_BCM57765:	return "57765";
17474 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17475 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17476 	case TG3_PHY_ID_BCM5762:	return "5762C";
17477 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17478 	case 0:			return "serdes";
17479 	default:		return "unknown";
17480 	}
17481 }
17482 
tg3_bus_string(struct tg3 * tp,char * str)17483 static char *tg3_bus_string(struct tg3 *tp, char *str)
17484 {
17485 	if (tg3_flag(tp, PCI_EXPRESS)) {
17486 		strcpy(str, "PCI Express");
17487 		return str;
17488 	} else if (tg3_flag(tp, PCIX_MODE)) {
17489 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17490 
17491 		strcpy(str, "PCIX:");
17492 
17493 		if ((clock_ctrl == 7) ||
17494 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17495 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17496 			strcat(str, "133MHz");
17497 		else if (clock_ctrl == 0)
17498 			strcat(str, "33MHz");
17499 		else if (clock_ctrl == 2)
17500 			strcat(str, "50MHz");
17501 		else if (clock_ctrl == 4)
17502 			strcat(str, "66MHz");
17503 		else if (clock_ctrl == 6)
17504 			strcat(str, "100MHz");
17505 	} else {
17506 		strcpy(str, "PCI:");
17507 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17508 			strcat(str, "66MHz");
17509 		else
17510 			strcat(str, "33MHz");
17511 	}
17512 	if (tg3_flag(tp, PCI_32BIT))
17513 		strcat(str, ":32-bit");
17514 	else
17515 		strcat(str, ":64-bit");
17516 	return str;
17517 }
17518 
tg3_init_coal(struct tg3 * tp)17519 static void tg3_init_coal(struct tg3 *tp)
17520 {
17521 	struct ethtool_coalesce *ec = &tp->coal;
17522 
17523 	memset(ec, 0, sizeof(*ec));
17524 	ec->cmd = ETHTOOL_GCOALESCE;
17525 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17526 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17527 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17528 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17529 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17530 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17531 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17532 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17533 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17534 
17535 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17536 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17537 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17538 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17539 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17540 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17541 	}
17542 
17543 	if (tg3_flag(tp, 5705_PLUS)) {
17544 		ec->rx_coalesce_usecs_irq = 0;
17545 		ec->tx_coalesce_usecs_irq = 0;
17546 		ec->stats_block_coalesce_usecs = 0;
17547 	}
17548 }
17549 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17550 static int tg3_init_one(struct pci_dev *pdev,
17551 				  const struct pci_device_id *ent)
17552 {
17553 	struct net_device *dev;
17554 	struct tg3 *tp;
17555 	int i, err;
17556 	u32 sndmbx, rcvmbx, intmbx;
17557 	char str[40];
17558 	u64 dma_mask, persist_dma_mask;
17559 	netdev_features_t features = 0;
17560 	u8 addr[ETH_ALEN] __aligned(2);
17561 
17562 	err = pci_enable_device(pdev);
17563 	if (err) {
17564 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17565 		return err;
17566 	}
17567 
17568 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17569 	if (err) {
17570 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17571 		goto err_out_disable_pdev;
17572 	}
17573 
17574 	pci_set_master(pdev);
17575 
17576 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17577 	if (!dev) {
17578 		err = -ENOMEM;
17579 		goto err_out_free_res;
17580 	}
17581 
17582 	SET_NETDEV_DEV(dev, &pdev->dev);
17583 
17584 	tp = netdev_priv(dev);
17585 	tp->pdev = pdev;
17586 	tp->dev = dev;
17587 	tp->rx_mode = TG3_DEF_RX_MODE;
17588 	tp->tx_mode = TG3_DEF_TX_MODE;
17589 	tp->irq_sync = 1;
17590 	tp->pcierr_recovery = false;
17591 
17592 	if (tg3_debug > 0)
17593 		tp->msg_enable = tg3_debug;
17594 	else
17595 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17596 
17597 	if (pdev_is_ssb_gige_core(pdev)) {
17598 		tg3_flag_set(tp, IS_SSB_CORE);
17599 		if (ssb_gige_must_flush_posted_writes(pdev))
17600 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17601 		if (ssb_gige_one_dma_at_once(pdev))
17602 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17603 		if (ssb_gige_have_roboswitch(pdev)) {
17604 			tg3_flag_set(tp, USE_PHYLIB);
17605 			tg3_flag_set(tp, ROBOSWITCH);
17606 		}
17607 		if (ssb_gige_is_rgmii(pdev))
17608 			tg3_flag_set(tp, RGMII_MODE);
17609 	}
17610 
17611 	/* The word/byte swap controls here control register access byte
17612 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17613 	 * setting below.
17614 	 */
17615 	tp->misc_host_ctrl =
17616 		MISC_HOST_CTRL_MASK_PCI_INT |
17617 		MISC_HOST_CTRL_WORD_SWAP |
17618 		MISC_HOST_CTRL_INDIR_ACCESS |
17619 		MISC_HOST_CTRL_PCISTATE_RW;
17620 
17621 	/* The NONFRM (non-frame) byte/word swap controls take effect
17622 	 * on descriptor entries, anything which isn't packet data.
17623 	 *
17624 	 * The StrongARM chips on the board (one for tx, one for rx)
17625 	 * are running in big-endian mode.
17626 	 */
17627 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17628 			GRC_MODE_WSWAP_NONFRM_DATA);
17629 #ifdef __BIG_ENDIAN
17630 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17631 #endif
17632 	spin_lock_init(&tp->lock);
17633 	spin_lock_init(&tp->indirect_lock);
17634 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17635 
17636 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17637 	if (!tp->regs) {
17638 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17639 		err = -ENOMEM;
17640 		goto err_out_free_dev;
17641 	}
17642 
17643 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17644 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17645 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17646 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17647 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17648 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17649 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17650 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17651 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17652 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17653 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17654 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17655 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17656 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17657 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17658 		tg3_flag_set(tp, ENABLE_APE);
17659 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17660 		if (!tp->aperegs) {
17661 			dev_err(&pdev->dev,
17662 				"Cannot map APE registers, aborting\n");
17663 			err = -ENOMEM;
17664 			goto err_out_iounmap;
17665 		}
17666 	}
17667 
17668 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17669 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17670 
17671 	dev->ethtool_ops = &tg3_ethtool_ops;
17672 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17673 	dev->netdev_ops = &tg3_netdev_ops;
17674 	dev->irq = pdev->irq;
17675 
17676 	err = tg3_get_invariants(tp, ent);
17677 	if (err) {
17678 		dev_err(&pdev->dev,
17679 			"Problem fetching invariants of chip, aborting\n");
17680 		goto err_out_apeunmap;
17681 	}
17682 
17683 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17684 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17685 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17686 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17687 	 * do DMA address check in tg3_start_xmit().
17688 	 */
17689 	if (tg3_flag(tp, IS_5788))
17690 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17691 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17692 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17693 #ifdef CONFIG_HIGHMEM
17694 		dma_mask = DMA_BIT_MASK(64);
17695 #endif
17696 	} else
17697 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17698 
17699 	/* Configure DMA attributes. */
17700 	if (dma_mask > DMA_BIT_MASK(32)) {
17701 		err = dma_set_mask(&pdev->dev, dma_mask);
17702 		if (!err) {
17703 			features |= NETIF_F_HIGHDMA;
17704 			err = dma_set_coherent_mask(&pdev->dev,
17705 						    persist_dma_mask);
17706 			if (err < 0) {
17707 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17708 					"DMA for consistent allocations\n");
17709 				goto err_out_apeunmap;
17710 			}
17711 		}
17712 	}
17713 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17714 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17715 		if (err) {
17716 			dev_err(&pdev->dev,
17717 				"No usable DMA configuration, aborting\n");
17718 			goto err_out_apeunmap;
17719 		}
17720 	}
17721 
17722 	tg3_init_bufmgr_config(tp);
17723 
17724 	/* 5700 B0 chips do not support checksumming correctly due
17725 	 * to hardware bugs.
17726 	 */
17727 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17728 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17729 
17730 		if (tg3_flag(tp, 5755_PLUS))
17731 			features |= NETIF_F_IPV6_CSUM;
17732 	}
17733 
17734 	/* TSO is on by default on chips that support hardware TSO.
17735 	 * Firmware TSO on older chips gives lower performance, so it
17736 	 * is off by default, but can be enabled using ethtool.
17737 	 */
17738 	if ((tg3_flag(tp, HW_TSO_1) ||
17739 	     tg3_flag(tp, HW_TSO_2) ||
17740 	     tg3_flag(tp, HW_TSO_3)) &&
17741 	    (features & NETIF_F_IP_CSUM))
17742 		features |= NETIF_F_TSO;
17743 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17744 		if (features & NETIF_F_IPV6_CSUM)
17745 			features |= NETIF_F_TSO6;
17746 		if (tg3_flag(tp, HW_TSO_3) ||
17747 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17748 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17749 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17750 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17751 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17752 			features |= NETIF_F_TSO_ECN;
17753 	}
17754 
17755 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17756 			 NETIF_F_HW_VLAN_CTAG_RX;
17757 	dev->vlan_features |= features;
17758 
17759 	/*
17760 	 * Add loopback capability only for a subset of devices that support
17761 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17762 	 * loopback for the remaining devices.
17763 	 */
17764 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17765 	    !tg3_flag(tp, CPMU_PRESENT))
17766 		/* Add the loopback capability */
17767 		features |= NETIF_F_LOOPBACK;
17768 
17769 	dev->hw_features |= features;
17770 	dev->priv_flags |= IFF_UNICAST_FLT;
17771 
17772 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17773 	dev->min_mtu = TG3_MIN_MTU;
17774 	dev->max_mtu = TG3_MAX_MTU(tp);
17775 
17776 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17777 	    !tg3_flag(tp, TSO_CAPABLE) &&
17778 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17779 		tg3_flag_set(tp, MAX_RXPEND_64);
17780 		tp->rx_pending = 63;
17781 	}
17782 
17783 	err = tg3_get_device_address(tp, addr);
17784 	if (err) {
17785 		dev_err(&pdev->dev,
17786 			"Could not obtain valid ethernet address, aborting\n");
17787 		goto err_out_apeunmap;
17788 	}
17789 	eth_hw_addr_set(dev, addr);
17790 
17791 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17792 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17793 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17794 	for (i = 0; i < tp->irq_max; i++) {
17795 		struct tg3_napi *tnapi = &tp->napi[i];
17796 
17797 		tnapi->tp = tp;
17798 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17799 
17800 		tnapi->int_mbox = intmbx;
17801 		if (i <= 4)
17802 			intmbx += 0x8;
17803 		else
17804 			intmbx += 0x4;
17805 
17806 		tnapi->consmbox = rcvmbx;
17807 		tnapi->prodmbox = sndmbx;
17808 
17809 		if (i)
17810 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17811 		else
17812 			tnapi->coal_now = HOSTCC_MODE_NOW;
17813 
17814 		if (!tg3_flag(tp, SUPPORT_MSIX))
17815 			break;
17816 
17817 		/*
17818 		 * If we support MSIX, we'll be using RSS.  If we're using
17819 		 * RSS, the first vector only handles link interrupts and the
17820 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17821 		 * mailbox values for the next iteration.  The values we setup
17822 		 * above are still useful for the single vectored mode.
17823 		 */
17824 		if (!i)
17825 			continue;
17826 
17827 		rcvmbx += 0x8;
17828 
17829 		if (sndmbx & 0x4)
17830 			sndmbx -= 0x4;
17831 		else
17832 			sndmbx += 0xc;
17833 	}
17834 
17835 	/*
17836 	 * Reset chip in case UNDI or EFI driver did not shutdown
17837 	 * DMA self test will enable WDMAC and we'll see (spurious)
17838 	 * pending DMA on the PCI bus at that point.
17839 	 */
17840 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17841 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17842 		tg3_full_lock(tp, 0);
17843 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17844 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17845 		tg3_full_unlock(tp);
17846 	}
17847 
17848 	err = tg3_test_dma(tp);
17849 	if (err) {
17850 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17851 		goto err_out_apeunmap;
17852 	}
17853 
17854 	tg3_init_coal(tp);
17855 
17856 	pci_set_drvdata(pdev, dev);
17857 
17858 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17859 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17860 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17861 		tg3_flag_set(tp, PTP_CAPABLE);
17862 
17863 	tg3_timer_init(tp);
17864 
17865 	tg3_carrier_off(tp);
17866 
17867 	err = register_netdev(dev);
17868 	if (err) {
17869 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17870 		goto err_out_apeunmap;
17871 	}
17872 
17873 	if (tg3_flag(tp, PTP_CAPABLE)) {
17874 		tg3_ptp_init(tp);
17875 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17876 						   &tp->pdev->dev);
17877 		if (IS_ERR(tp->ptp_clock))
17878 			tp->ptp_clock = NULL;
17879 	}
17880 
17881 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17882 		    tp->board_part_number,
17883 		    tg3_chip_rev_id(tp),
17884 		    tg3_bus_string(tp, str),
17885 		    dev->dev_addr);
17886 
17887 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17888 		char *ethtype;
17889 
17890 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17891 			ethtype = "10/100Base-TX";
17892 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17893 			ethtype = "1000Base-SX";
17894 		else
17895 			ethtype = "10/100/1000Base-T";
17896 
17897 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17898 			    "(WireSpeed[%d], EEE[%d])\n",
17899 			    tg3_phy_string(tp), ethtype,
17900 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17901 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17902 	}
17903 
17904 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17905 		    (dev->features & NETIF_F_RXCSUM) != 0,
17906 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17907 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17908 		    tg3_flag(tp, ENABLE_ASF) != 0,
17909 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17910 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17911 		    tp->dma_rwctrl,
17912 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17913 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17914 
17915 	pci_save_state(pdev);
17916 
17917 	return 0;
17918 
17919 err_out_apeunmap:
17920 	if (tp->aperegs) {
17921 		iounmap(tp->aperegs);
17922 		tp->aperegs = NULL;
17923 	}
17924 
17925 err_out_iounmap:
17926 	if (tp->regs) {
17927 		iounmap(tp->regs);
17928 		tp->regs = NULL;
17929 	}
17930 
17931 err_out_free_dev:
17932 	free_netdev(dev);
17933 
17934 err_out_free_res:
17935 	pci_release_regions(pdev);
17936 
17937 err_out_disable_pdev:
17938 	if (pci_is_enabled(pdev))
17939 		pci_disable_device(pdev);
17940 	return err;
17941 }
17942 
tg3_remove_one(struct pci_dev * pdev)17943 static void tg3_remove_one(struct pci_dev *pdev)
17944 {
17945 	struct net_device *dev = pci_get_drvdata(pdev);
17946 
17947 	if (dev) {
17948 		struct tg3 *tp = netdev_priv(dev);
17949 
17950 		tg3_ptp_fini(tp);
17951 
17952 		release_firmware(tp->fw);
17953 
17954 		tg3_reset_task_cancel(tp);
17955 
17956 		if (tg3_flag(tp, USE_PHYLIB)) {
17957 			tg3_phy_fini(tp);
17958 			tg3_mdio_fini(tp);
17959 		}
17960 
17961 		unregister_netdev(dev);
17962 		if (tp->aperegs) {
17963 			iounmap(tp->aperegs);
17964 			tp->aperegs = NULL;
17965 		}
17966 		if (tp->regs) {
17967 			iounmap(tp->regs);
17968 			tp->regs = NULL;
17969 		}
17970 		free_netdev(dev);
17971 		pci_release_regions(pdev);
17972 		pci_disable_device(pdev);
17973 	}
17974 }
17975 
17976 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)17977 static int tg3_suspend(struct device *device)
17978 {
17979 	struct net_device *dev = dev_get_drvdata(device);
17980 	struct tg3 *tp = netdev_priv(dev);
17981 	int err = 0;
17982 
17983 	rtnl_lock();
17984 
17985 	if (!netif_running(dev))
17986 		goto unlock;
17987 
17988 	tg3_reset_task_cancel(tp);
17989 	tg3_phy_stop(tp);
17990 	tg3_netif_stop(tp);
17991 
17992 	tg3_timer_stop(tp);
17993 
17994 	tg3_full_lock(tp, 1);
17995 	tg3_disable_ints(tp);
17996 	tg3_full_unlock(tp);
17997 
17998 	netif_device_detach(dev);
17999 
18000 	tg3_full_lock(tp, 0);
18001 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18002 	tg3_flag_clear(tp, INIT_COMPLETE);
18003 	tg3_full_unlock(tp);
18004 
18005 	err = tg3_power_down_prepare(tp);
18006 	if (err) {
18007 		int err2;
18008 
18009 		tg3_full_lock(tp, 0);
18010 
18011 		tg3_flag_set(tp, INIT_COMPLETE);
18012 		err2 = tg3_restart_hw(tp, true);
18013 		if (err2)
18014 			goto out;
18015 
18016 		tg3_timer_start(tp);
18017 
18018 		netif_device_attach(dev);
18019 		tg3_netif_start(tp);
18020 
18021 out:
18022 		tg3_full_unlock(tp);
18023 
18024 		if (!err2)
18025 			tg3_phy_start(tp);
18026 	}
18027 
18028 unlock:
18029 	rtnl_unlock();
18030 	return err;
18031 }
18032 
tg3_resume(struct device * device)18033 static int tg3_resume(struct device *device)
18034 {
18035 	struct net_device *dev = dev_get_drvdata(device);
18036 	struct tg3 *tp = netdev_priv(dev);
18037 	int err = 0;
18038 
18039 	rtnl_lock();
18040 
18041 	if (!netif_running(dev))
18042 		goto unlock;
18043 
18044 	netif_device_attach(dev);
18045 
18046 	tg3_full_lock(tp, 0);
18047 
18048 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18049 
18050 	tg3_flag_set(tp, INIT_COMPLETE);
18051 	err = tg3_restart_hw(tp,
18052 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18053 	if (err)
18054 		goto out;
18055 
18056 	tg3_timer_start(tp);
18057 
18058 	tg3_netif_start(tp);
18059 
18060 out:
18061 	tg3_full_unlock(tp);
18062 
18063 	if (!err)
18064 		tg3_phy_start(tp);
18065 
18066 unlock:
18067 	rtnl_unlock();
18068 	return err;
18069 }
18070 #endif /* CONFIG_PM_SLEEP */
18071 
18072 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18073 
tg3_shutdown(struct pci_dev * pdev)18074 static void tg3_shutdown(struct pci_dev *pdev)
18075 {
18076 	struct net_device *dev = pci_get_drvdata(pdev);
18077 	struct tg3 *tp = netdev_priv(dev);
18078 
18079 	tg3_reset_task_cancel(tp);
18080 
18081 	rtnl_lock();
18082 
18083 	netif_device_detach(dev);
18084 
18085 	if (netif_running(dev))
18086 		dev_close(dev);
18087 
18088 	tg3_power_down(tp);
18089 
18090 	rtnl_unlock();
18091 
18092 	pci_disable_device(pdev);
18093 }
18094 
18095 /**
18096  * tg3_io_error_detected - called when PCI error is detected
18097  * @pdev: Pointer to PCI device
18098  * @state: The current pci connection state
18099  *
18100  * This function is called after a PCI bus error affecting
18101  * this device has been detected.
18102  */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18103 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18104 					      pci_channel_state_t state)
18105 {
18106 	struct net_device *netdev = pci_get_drvdata(pdev);
18107 	struct tg3 *tp = netdev_priv(netdev);
18108 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18109 
18110 	netdev_info(netdev, "PCI I/O error detected\n");
18111 
18112 	rtnl_lock();
18113 
18114 	/* Could be second call or maybe we don't have netdev yet */
18115 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18116 		goto done;
18117 
18118 	/* We needn't recover from permanent error */
18119 	if (state == pci_channel_io_frozen)
18120 		tp->pcierr_recovery = true;
18121 
18122 	tg3_phy_stop(tp);
18123 
18124 	tg3_netif_stop(tp);
18125 
18126 	tg3_timer_stop(tp);
18127 
18128 	/* Want to make sure that the reset task doesn't run */
18129 	tg3_reset_task_cancel(tp);
18130 
18131 	netif_device_detach(netdev);
18132 
18133 	/* Clean up software state, even if MMIO is blocked */
18134 	tg3_full_lock(tp, 0);
18135 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18136 	tg3_full_unlock(tp);
18137 
18138 done:
18139 	if (state == pci_channel_io_perm_failure) {
18140 		if (netdev) {
18141 			tg3_napi_enable(tp);
18142 			dev_close(netdev);
18143 		}
18144 		err = PCI_ERS_RESULT_DISCONNECT;
18145 	} else {
18146 		pci_disable_device(pdev);
18147 	}
18148 
18149 	rtnl_unlock();
18150 
18151 	return err;
18152 }
18153 
18154 /**
18155  * tg3_io_slot_reset - called after the pci bus has been reset.
18156  * @pdev: Pointer to PCI device
18157  *
18158  * Restart the card from scratch, as if from a cold-boot.
18159  * At this point, the card has exprienced a hard reset,
18160  * followed by fixups by BIOS, and has its config space
18161  * set up identically to what it was at cold boot.
18162  */
tg3_io_slot_reset(struct pci_dev * pdev)18163 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18164 {
18165 	struct net_device *netdev = pci_get_drvdata(pdev);
18166 	struct tg3 *tp = netdev_priv(netdev);
18167 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18168 	int err;
18169 
18170 	rtnl_lock();
18171 
18172 	if (pci_enable_device(pdev)) {
18173 		dev_err(&pdev->dev,
18174 			"Cannot re-enable PCI device after reset.\n");
18175 		goto done;
18176 	}
18177 
18178 	pci_set_master(pdev);
18179 	pci_restore_state(pdev);
18180 	pci_save_state(pdev);
18181 
18182 	if (!netdev || !netif_running(netdev)) {
18183 		rc = PCI_ERS_RESULT_RECOVERED;
18184 		goto done;
18185 	}
18186 
18187 	err = tg3_power_up(tp);
18188 	if (err)
18189 		goto done;
18190 
18191 	rc = PCI_ERS_RESULT_RECOVERED;
18192 
18193 done:
18194 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18195 		tg3_napi_enable(tp);
18196 		dev_close(netdev);
18197 	}
18198 	rtnl_unlock();
18199 
18200 	return rc;
18201 }
18202 
18203 /**
18204  * tg3_io_resume - called when traffic can start flowing again.
18205  * @pdev: Pointer to PCI device
18206  *
18207  * This callback is called when the error recovery driver tells
18208  * us that its OK to resume normal operation.
18209  */
tg3_io_resume(struct pci_dev * pdev)18210 static void tg3_io_resume(struct pci_dev *pdev)
18211 {
18212 	struct net_device *netdev = pci_get_drvdata(pdev);
18213 	struct tg3 *tp = netdev_priv(netdev);
18214 	int err;
18215 
18216 	rtnl_lock();
18217 
18218 	if (!netdev || !netif_running(netdev))
18219 		goto done;
18220 
18221 	tg3_full_lock(tp, 0);
18222 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18223 	tg3_flag_set(tp, INIT_COMPLETE);
18224 	err = tg3_restart_hw(tp, true);
18225 	if (err) {
18226 		tg3_full_unlock(tp);
18227 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18228 		goto done;
18229 	}
18230 
18231 	netif_device_attach(netdev);
18232 
18233 	tg3_timer_start(tp);
18234 
18235 	tg3_netif_start(tp);
18236 
18237 	tg3_full_unlock(tp);
18238 
18239 	tg3_phy_start(tp);
18240 
18241 done:
18242 	tp->pcierr_recovery = false;
18243 	rtnl_unlock();
18244 }
18245 
18246 static const struct pci_error_handlers tg3_err_handler = {
18247 	.error_detected	= tg3_io_error_detected,
18248 	.slot_reset	= tg3_io_slot_reset,
18249 	.resume		= tg3_io_resume
18250 };
18251 
18252 static struct pci_driver tg3_driver = {
18253 	.name		= DRV_MODULE_NAME,
18254 	.id_table	= tg3_pci_tbl,
18255 	.probe		= tg3_init_one,
18256 	.remove		= tg3_remove_one,
18257 	.err_handler	= &tg3_err_handler,
18258 	.driver.pm	= &tg3_pm_ops,
18259 	.shutdown	= tg3_shutdown,
18260 };
18261 
18262 module_pci_driver(tg3_driver);
18263