1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53 
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60 
61 #define DRV_MODULE_NAME		"bnx2"
62 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
63 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
64 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
65 #define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
66 #define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
67 
68 #define RUN_AT(x) (jiffies + (x))
69 
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72 
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_FIRMWARE(FW_MIPS_FILE_06);
77 MODULE_FIRMWARE(FW_RV2P_FILE_06);
78 MODULE_FIRMWARE(FW_MIPS_FILE_09);
79 MODULE_FIRMWARE(FW_RV2P_FILE_09);
80 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
81 
82 static int disable_msi = 0;
83 
84 module_param(disable_msi, int, 0444);
85 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86 
87 typedef enum {
88 	BCM5706 = 0,
89 	NC370T,
90 	NC370I,
91 	BCM5706S,
92 	NC370F,
93 	BCM5708,
94 	BCM5708S,
95 	BCM5709,
96 	BCM5709S,
97 	BCM5716,
98 	BCM5716S,
99 } board_t;
100 
101 /* indexed by board_t, above */
102 static struct {
103 	char *name;
104 } board_info[] = {
105 	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
106 	{ "HP NC370T Multifunction Gigabit Server Adapter" },
107 	{ "HP NC370i Multifunction Gigabit Server Adapter" },
108 	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109 	{ "HP NC370F Multifunction Gigabit Server Adapter" },
110 	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
111 	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
112 	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
113 	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
114 	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
115 	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
116 	};
117 
118 static const struct pci_device_id bnx2_pci_tbl[] = {
119 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120 	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122 	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
125 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
127 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128 	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
131 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
133 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
135 	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
137 	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
138 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
139 	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
140 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
141 	{ 0, }
142 };
143 
144 static const struct flash_spec flash_table[] =
145 {
146 #define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147 #define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
148 	/* Slow EEPROM */
149 	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
150 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
151 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152 	 "EEPROM - slow"},
153 	/* Expansion entry 0001 */
154 	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
155 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
156 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 	 "Entry 0001"},
158 	/* Saifun SA25F010 (non-buffered flash) */
159 	/* strap, cfg1, & write1 need updates */
160 	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
161 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163 	 "Non-buffered flash (128kB)"},
164 	/* Saifun SA25F020 (non-buffered flash) */
165 	/* strap, cfg1, & write1 need updates */
166 	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
167 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169 	 "Non-buffered flash (256kB)"},
170 	/* Expansion entry 0100 */
171 	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
172 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174 	 "Entry 0100"},
175 	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
176 	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
177 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
178 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179 	 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
180 	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
181 	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
182 	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184 	 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
185 	/* Saifun SA25F005 (non-buffered flash) */
186 	/* strap, cfg1, & write1 need updates */
187 	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
188 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190 	 "Non-buffered flash (64kB)"},
191 	/* Fast EEPROM */
192 	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
193 	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
194 	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195 	 "EEPROM - fast"},
196 	/* Expansion entry 1001 */
197 	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
198 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 	 "Entry 1001"},
201 	/* Expansion entry 1010 */
202 	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
203 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 	 "Entry 1010"},
206 	/* ATMEL AT45DB011B (buffered flash) */
207 	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
208 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210 	 "Buffered flash (128kB)"},
211 	/* Expansion entry 1100 */
212 	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
213 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
214 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215 	 "Entry 1100"},
216 	/* Expansion entry 1101 */
217 	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
218 	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 	 "Entry 1101"},
221 	/* Ateml Expansion entry 1110 */
222 	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
223 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
224 	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225 	 "Entry 1110 (Atmel)"},
226 	/* ATMEL AT45DB021B (buffered flash) */
227 	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
228 	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230 	 "Buffered flash (256kB)"},
231 };
232 
233 static const struct flash_spec flash_5709 = {
234 	.flags		= BNX2_NV_BUFFERED,
235 	.page_bits	= BCM5709_FLASH_PAGE_BITS,
236 	.page_size	= BCM5709_FLASH_PAGE_SIZE,
237 	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
238 	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
239 	.name		= "5709 Buffered flash (256kB)",
240 };
241 
242 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243 
244 static void bnx2_init_napi(struct bnx2 *bp);
245 static void bnx2_del_napi(struct bnx2 *bp);
246 
bnx2_tx_avail(struct bnx2 * bp,struct bnx2_tx_ring_info * txr)247 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
248 {
249 	u32 diff;
250 
251 	/* The ring uses 256 indices for 255 entries, one of them
252 	 * needs to be skipped.
253 	 */
254 	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
255 	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
256 		diff &= 0xffff;
257 		if (diff == BNX2_TX_DESC_CNT)
258 			diff = BNX2_MAX_TX_DESC_CNT;
259 	}
260 	return bp->tx_ring_size - diff;
261 }
262 
263 static u32
bnx2_reg_rd_ind(struct bnx2 * bp,u32 offset)264 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
265 {
266 	unsigned long flags;
267 	u32 val;
268 
269 	spin_lock_irqsave(&bp->indirect_lock, flags);
270 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
271 	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
272 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
273 	return val;
274 }
275 
276 static void
bnx2_reg_wr_ind(struct bnx2 * bp,u32 offset,u32 val)277 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
278 {
279 	unsigned long flags;
280 
281 	spin_lock_irqsave(&bp->indirect_lock, flags);
282 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
284 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
285 }
286 
287 static void
bnx2_shmem_wr(struct bnx2 * bp,u32 offset,u32 val)288 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289 {
290 	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
291 }
292 
293 static u32
bnx2_shmem_rd(struct bnx2 * bp,u32 offset)294 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295 {
296 	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
297 }
298 
299 static void
bnx2_ctx_wr(struct bnx2 * bp,u32 cid_addr,u32 offset,u32 val)300 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
301 {
302 	unsigned long flags;
303 
304 	offset += cid_addr;
305 	spin_lock_irqsave(&bp->indirect_lock, flags);
306 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
307 		int i;
308 
309 		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
310 		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
311 			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312 		for (i = 0; i < 5; i++) {
313 			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
314 			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315 				break;
316 			udelay(5);
317 		}
318 	} else {
319 		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
320 		BNX2_WR(bp, BNX2_CTX_DATA, val);
321 	}
322 	spin_unlock_irqrestore(&bp->indirect_lock, flags);
323 }
324 
325 #ifdef BCM_CNIC
326 static int
bnx2_drv_ctl(struct net_device * dev,struct drv_ctl_info * info)327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329 	struct bnx2 *bp = netdev_priv(dev);
330 	struct drv_ctl_io *io = &info->data.io;
331 
332 	switch (info->cmd) {
333 	case DRV_CTL_IO_WR_CMD:
334 		bnx2_reg_wr_ind(bp, io->offset, io->data);
335 		break;
336 	case DRV_CTL_IO_RD_CMD:
337 		io->data = bnx2_reg_rd_ind(bp, io->offset);
338 		break;
339 	case DRV_CTL_CTX_WR_CMD:
340 		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341 		break;
342 	default:
343 		return -EINVAL;
344 	}
345 	return 0;
346 }
347 
bnx2_setup_cnic_irq_info(struct bnx2 * bp)348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352 	int sb_id;
353 
354 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
355 		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356 		bnapi->cnic_present = 0;
357 		sb_id = bp->irq_nvecs;
358 		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359 	} else {
360 		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361 		bnapi->cnic_tag = bnapi->last_status_idx;
362 		bnapi->cnic_present = 1;
363 		sb_id = 0;
364 		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365 	}
366 
367 	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368 	cp->irq_arr[0].status_blk = (void *)
369 		((unsigned long) bnapi->status_blk.msi +
370 		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371 	cp->irq_arr[0].status_blk_num = sb_id;
372 	cp->num_irq = 1;
373 }
374 
bnx2_register_cnic(struct net_device * dev,struct cnic_ops * ops,void * data)375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376 			      void *data)
377 {
378 	struct bnx2 *bp = netdev_priv(dev);
379 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380 
381 	if (!ops)
382 		return -EINVAL;
383 
384 	if (cp->drv_state & CNIC_DRV_STATE_REGD)
385 		return -EBUSY;
386 
387 	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
388 		return -ENODEV;
389 
390 	bp->cnic_data = data;
391 	rcu_assign_pointer(bp->cnic_ops, ops);
392 
393 	cp->num_irq = 0;
394 	cp->drv_state = CNIC_DRV_STATE_REGD;
395 
396 	bnx2_setup_cnic_irq_info(bp);
397 
398 	return 0;
399 }
400 
bnx2_unregister_cnic(struct net_device * dev)401 static int bnx2_unregister_cnic(struct net_device *dev)
402 {
403 	struct bnx2 *bp = netdev_priv(dev);
404 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406 
407 	mutex_lock(&bp->cnic_lock);
408 	cp->drv_state = 0;
409 	bnapi->cnic_present = 0;
410 	RCU_INIT_POINTER(bp->cnic_ops, NULL);
411 	mutex_unlock(&bp->cnic_lock);
412 	synchronize_rcu();
413 	return 0;
414 }
415 
bnx2_cnic_probe(struct net_device * dev)416 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417 {
418 	struct bnx2 *bp = netdev_priv(dev);
419 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420 
421 	if (!cp->max_iscsi_conn)
422 		return NULL;
423 
424 	cp->drv_owner = THIS_MODULE;
425 	cp->chip_id = bp->chip_id;
426 	cp->pdev = bp->pdev;
427 	cp->io_base = bp->regview;
428 	cp->drv_ctl = bnx2_drv_ctl;
429 	cp->drv_register_cnic = bnx2_register_cnic;
430 	cp->drv_unregister_cnic = bnx2_unregister_cnic;
431 
432 	return cp;
433 }
434 
435 static void
bnx2_cnic_stop(struct bnx2 * bp)436 bnx2_cnic_stop(struct bnx2 *bp)
437 {
438 	struct cnic_ops *c_ops;
439 	struct cnic_ctl_info info;
440 
441 	mutex_lock(&bp->cnic_lock);
442 	c_ops = rcu_dereference_protected(bp->cnic_ops,
443 					  lockdep_is_held(&bp->cnic_lock));
444 	if (c_ops) {
445 		info.cmd = CNIC_CTL_STOP_CMD;
446 		c_ops->cnic_ctl(bp->cnic_data, &info);
447 	}
448 	mutex_unlock(&bp->cnic_lock);
449 }
450 
451 static void
bnx2_cnic_start(struct bnx2 * bp)452 bnx2_cnic_start(struct bnx2 *bp)
453 {
454 	struct cnic_ops *c_ops;
455 	struct cnic_ctl_info info;
456 
457 	mutex_lock(&bp->cnic_lock);
458 	c_ops = rcu_dereference_protected(bp->cnic_ops,
459 					  lockdep_is_held(&bp->cnic_lock));
460 	if (c_ops) {
461 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
462 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
463 
464 			bnapi->cnic_tag = bnapi->last_status_idx;
465 		}
466 		info.cmd = CNIC_CTL_START_CMD;
467 		c_ops->cnic_ctl(bp->cnic_data, &info);
468 	}
469 	mutex_unlock(&bp->cnic_lock);
470 }
471 
472 #else
473 
474 static void
bnx2_cnic_stop(struct bnx2 * bp)475 bnx2_cnic_stop(struct bnx2 *bp)
476 {
477 }
478 
479 static void
bnx2_cnic_start(struct bnx2 * bp)480 bnx2_cnic_start(struct bnx2 *bp)
481 {
482 }
483 
484 #endif
485 
486 static int
bnx2_read_phy(struct bnx2 * bp,u32 reg,u32 * val)487 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
488 {
489 	u32 val1;
490 	int i, ret;
491 
492 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
493 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
495 
496 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
497 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498 
499 		udelay(40);
500 	}
501 
502 	val1 = (bp->phy_addr << 21) | (reg << 16) |
503 		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
504 		BNX2_EMAC_MDIO_COMM_START_BUSY;
505 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
506 
507 	for (i = 0; i < 50; i++) {
508 		udelay(10);
509 
510 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
512 			udelay(5);
513 
514 			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516 
517 			break;
518 		}
519 	}
520 
521 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
522 		*val = 0x0;
523 		ret = -EBUSY;
524 	}
525 	else {
526 		*val = val1;
527 		ret = 0;
528 	}
529 
530 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
531 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
533 
534 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
535 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536 
537 		udelay(40);
538 	}
539 
540 	return ret;
541 }
542 
543 static int
bnx2_write_phy(struct bnx2 * bp,u32 reg,u32 val)544 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
545 {
546 	u32 val1;
547 	int i, ret;
548 
549 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
550 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551 		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
552 
553 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
554 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555 
556 		udelay(40);
557 	}
558 
559 	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
560 		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
561 		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
562 	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
563 
564 	for (i = 0; i < 50; i++) {
565 		udelay(10);
566 
567 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
568 		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569 			udelay(5);
570 			break;
571 		}
572 	}
573 
574 	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
575 		ret = -EBUSY;
576 	else
577 		ret = 0;
578 
579 	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
580 		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581 		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
582 
583 		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
584 		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585 
586 		udelay(40);
587 	}
588 
589 	return ret;
590 }
591 
592 static void
bnx2_disable_int(struct bnx2 * bp)593 bnx2_disable_int(struct bnx2 *bp)
594 {
595 	int i;
596 	struct bnx2_napi *bnapi;
597 
598 	for (i = 0; i < bp->irq_nvecs; i++) {
599 		bnapi = &bp->bnx2_napi[i];
600 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
601 		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
602 	}
603 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
604 }
605 
606 static void
bnx2_enable_int(struct bnx2 * bp)607 bnx2_enable_int(struct bnx2 *bp)
608 {
609 	int i;
610 	struct bnx2_napi *bnapi;
611 
612 	for (i = 0; i < bp->irq_nvecs; i++) {
613 		bnapi = &bp->bnx2_napi[i];
614 
615 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
618 			bnapi->last_status_idx);
619 
620 		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
621 			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
622 			bnapi->last_status_idx);
623 	}
624 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
625 }
626 
627 static void
bnx2_disable_int_sync(struct bnx2 * bp)628 bnx2_disable_int_sync(struct bnx2 *bp)
629 {
630 	int i;
631 
632 	atomic_inc(&bp->intr_sem);
633 	if (!netif_running(bp->dev))
634 		return;
635 
636 	bnx2_disable_int(bp);
637 	for (i = 0; i < bp->irq_nvecs; i++)
638 		synchronize_irq(bp->irq_tbl[i].vector);
639 }
640 
641 static void
bnx2_napi_disable(struct bnx2 * bp)642 bnx2_napi_disable(struct bnx2 *bp)
643 {
644 	int i;
645 
646 	for (i = 0; i < bp->irq_nvecs; i++)
647 		napi_disable(&bp->bnx2_napi[i].napi);
648 }
649 
650 static void
bnx2_napi_enable(struct bnx2 * bp)651 bnx2_napi_enable(struct bnx2 *bp)
652 {
653 	int i;
654 
655 	for (i = 0; i < bp->irq_nvecs; i++)
656 		napi_enable(&bp->bnx2_napi[i].napi);
657 }
658 
659 static void
bnx2_netif_stop(struct bnx2 * bp,bool stop_cnic)660 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
661 {
662 	if (stop_cnic)
663 		bnx2_cnic_stop(bp);
664 	if (netif_running(bp->dev)) {
665 		bnx2_napi_disable(bp);
666 		netif_tx_disable(bp->dev);
667 	}
668 	bnx2_disable_int_sync(bp);
669 	netif_carrier_off(bp->dev);	/* prevent tx timeout */
670 }
671 
672 static void
bnx2_netif_start(struct bnx2 * bp,bool start_cnic)673 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674 {
675 	if (atomic_dec_and_test(&bp->intr_sem)) {
676 		if (netif_running(bp->dev)) {
677 			netif_tx_wake_all_queues(bp->dev);
678 			spin_lock_bh(&bp->phy_lock);
679 			if (bp->link_up)
680 				netif_carrier_on(bp->dev);
681 			spin_unlock_bh(&bp->phy_lock);
682 			bnx2_napi_enable(bp);
683 			bnx2_enable_int(bp);
684 			if (start_cnic)
685 				bnx2_cnic_start(bp);
686 		}
687 	}
688 }
689 
690 static void
bnx2_free_tx_mem(struct bnx2 * bp)691 bnx2_free_tx_mem(struct bnx2 *bp)
692 {
693 	int i;
694 
695 	for (i = 0; i < bp->num_tx_rings; i++) {
696 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
697 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
698 
699 		if (txr->tx_desc_ring) {
700 			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
701 					  txr->tx_desc_ring,
702 					  txr->tx_desc_mapping);
703 			txr->tx_desc_ring = NULL;
704 		}
705 		kfree(txr->tx_buf_ring);
706 		txr->tx_buf_ring = NULL;
707 	}
708 }
709 
710 static void
bnx2_free_rx_mem(struct bnx2 * bp)711 bnx2_free_rx_mem(struct bnx2 *bp)
712 {
713 	int i;
714 
715 	for (i = 0; i < bp->num_rx_rings; i++) {
716 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
717 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
718 		int j;
719 
720 		for (j = 0; j < bp->rx_max_ring; j++) {
721 			if (rxr->rx_desc_ring[j])
722 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
723 						  rxr->rx_desc_ring[j],
724 						  rxr->rx_desc_mapping[j]);
725 			rxr->rx_desc_ring[j] = NULL;
726 		}
727 		vfree(rxr->rx_buf_ring);
728 		rxr->rx_buf_ring = NULL;
729 
730 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
731 			if (rxr->rx_pg_desc_ring[j])
732 				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
733 						  rxr->rx_pg_desc_ring[j],
734 						  rxr->rx_pg_desc_mapping[j]);
735 			rxr->rx_pg_desc_ring[j] = NULL;
736 		}
737 		vfree(rxr->rx_pg_ring);
738 		rxr->rx_pg_ring = NULL;
739 	}
740 }
741 
742 static int
bnx2_alloc_tx_mem(struct bnx2 * bp)743 bnx2_alloc_tx_mem(struct bnx2 *bp)
744 {
745 	int i;
746 
747 	for (i = 0; i < bp->num_tx_rings; i++) {
748 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
749 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
750 
751 		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
752 		if (!txr->tx_buf_ring)
753 			return -ENOMEM;
754 
755 		txr->tx_desc_ring =
756 			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
757 					   &txr->tx_desc_mapping, GFP_KERNEL);
758 		if (!txr->tx_desc_ring)
759 			return -ENOMEM;
760 	}
761 	return 0;
762 }
763 
764 static int
bnx2_alloc_rx_mem(struct bnx2 * bp)765 bnx2_alloc_rx_mem(struct bnx2 *bp)
766 {
767 	int i;
768 
769 	for (i = 0; i < bp->num_rx_rings; i++) {
770 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
771 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
772 		int j;
773 
774 		rxr->rx_buf_ring =
775 			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
776 		if (!rxr->rx_buf_ring)
777 			return -ENOMEM;
778 
779 		for (j = 0; j < bp->rx_max_ring; j++) {
780 			rxr->rx_desc_ring[j] =
781 				dma_alloc_coherent(&bp->pdev->dev,
782 						   RXBD_RING_SIZE,
783 						   &rxr->rx_desc_mapping[j],
784 						   GFP_KERNEL);
785 			if (!rxr->rx_desc_ring[j])
786 				return -ENOMEM;
787 
788 		}
789 
790 		if (bp->rx_pg_ring_size) {
791 			rxr->rx_pg_ring =
792 				vzalloc(array_size(SW_RXPG_RING_SIZE,
793 						   bp->rx_max_pg_ring));
794 			if (!rxr->rx_pg_ring)
795 				return -ENOMEM;
796 
797 		}
798 
799 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 			rxr->rx_pg_desc_ring[j] =
801 				dma_alloc_coherent(&bp->pdev->dev,
802 						   RXBD_RING_SIZE,
803 						   &rxr->rx_pg_desc_mapping[j],
804 						   GFP_KERNEL);
805 			if (!rxr->rx_pg_desc_ring[j])
806 				return -ENOMEM;
807 
808 		}
809 	}
810 	return 0;
811 }
812 
813 static void
bnx2_free_stats_blk(struct net_device * dev)814 bnx2_free_stats_blk(struct net_device *dev)
815 {
816 	struct bnx2 *bp = netdev_priv(dev);
817 
818 	if (bp->status_blk) {
819 		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
820 				  bp->status_blk,
821 				  bp->status_blk_mapping);
822 		bp->status_blk = NULL;
823 		bp->stats_blk = NULL;
824 	}
825 }
826 
827 static int
bnx2_alloc_stats_blk(struct net_device * dev)828 bnx2_alloc_stats_blk(struct net_device *dev)
829 {
830 	int status_blk_size;
831 	void *status_blk;
832 	struct bnx2 *bp = netdev_priv(dev);
833 
834 	/* Combine status and statistics blocks into one allocation. */
835 	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
836 	if (bp->flags & BNX2_FLAG_MSIX_CAP)
837 		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
838 						 BNX2_SBLK_MSIX_ALIGN_SIZE);
839 	bp->status_stats_size = status_blk_size +
840 				sizeof(struct statistics_block);
841 	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
842 					&bp->status_blk_mapping, GFP_KERNEL);
843 	if (!status_blk)
844 		return -ENOMEM;
845 
846 	bp->status_blk = status_blk;
847 	bp->stats_blk = status_blk + status_blk_size;
848 	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
849 
850 	return 0;
851 }
852 
853 static void
bnx2_free_mem(struct bnx2 * bp)854 bnx2_free_mem(struct bnx2 *bp)
855 {
856 	int i;
857 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858 
859 	bnx2_free_tx_mem(bp);
860 	bnx2_free_rx_mem(bp);
861 
862 	for (i = 0; i < bp->ctx_pages; i++) {
863 		if (bp->ctx_blk[i]) {
864 			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
865 					  bp->ctx_blk[i],
866 					  bp->ctx_blk_mapping[i]);
867 			bp->ctx_blk[i] = NULL;
868 		}
869 	}
870 
871 	if (bnapi->status_blk.msi)
872 		bnapi->status_blk.msi = NULL;
873 }
874 
875 static int
bnx2_alloc_mem(struct bnx2 * bp)876 bnx2_alloc_mem(struct bnx2 *bp)
877 {
878 	int i, err;
879 	struct bnx2_napi *bnapi;
880 
881 	bnapi = &bp->bnx2_napi[0];
882 	bnapi->status_blk.msi = bp->status_blk;
883 	bnapi->hw_tx_cons_ptr =
884 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
885 	bnapi->hw_rx_cons_ptr =
886 		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
887 	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
888 		for (i = 1; i < bp->irq_nvecs; i++) {
889 			struct status_block_msix *sblk;
890 
891 			bnapi = &bp->bnx2_napi[i];
892 
893 			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
894 			bnapi->status_blk.msix = sblk;
895 			bnapi->hw_tx_cons_ptr =
896 				&sblk->status_tx_quick_consumer_index;
897 			bnapi->hw_rx_cons_ptr =
898 				&sblk->status_rx_quick_consumer_index;
899 			bnapi->int_num = i << 24;
900 		}
901 	}
902 
903 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
904 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
905 		if (bp->ctx_pages == 0)
906 			bp->ctx_pages = 1;
907 		for (i = 0; i < bp->ctx_pages; i++) {
908 			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909 						BNX2_PAGE_SIZE,
910 						&bp->ctx_blk_mapping[i],
911 						GFP_KERNEL);
912 			if (!bp->ctx_blk[i])
913 				goto alloc_mem_err;
914 		}
915 	}
916 
917 	err = bnx2_alloc_rx_mem(bp);
918 	if (err)
919 		goto alloc_mem_err;
920 
921 	err = bnx2_alloc_tx_mem(bp);
922 	if (err)
923 		goto alloc_mem_err;
924 
925 	return 0;
926 
927 alloc_mem_err:
928 	bnx2_free_mem(bp);
929 	return -ENOMEM;
930 }
931 
932 static void
bnx2_report_fw_link(struct bnx2 * bp)933 bnx2_report_fw_link(struct bnx2 *bp)
934 {
935 	u32 fw_link_status = 0;
936 
937 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
938 		return;
939 
940 	if (bp->link_up) {
941 		u32 bmsr;
942 
943 		switch (bp->line_speed) {
944 		case SPEED_10:
945 			if (bp->duplex == DUPLEX_HALF)
946 				fw_link_status = BNX2_LINK_STATUS_10HALF;
947 			else
948 				fw_link_status = BNX2_LINK_STATUS_10FULL;
949 			break;
950 		case SPEED_100:
951 			if (bp->duplex == DUPLEX_HALF)
952 				fw_link_status = BNX2_LINK_STATUS_100HALF;
953 			else
954 				fw_link_status = BNX2_LINK_STATUS_100FULL;
955 			break;
956 		case SPEED_1000:
957 			if (bp->duplex == DUPLEX_HALF)
958 				fw_link_status = BNX2_LINK_STATUS_1000HALF;
959 			else
960 				fw_link_status = BNX2_LINK_STATUS_1000FULL;
961 			break;
962 		case SPEED_2500:
963 			if (bp->duplex == DUPLEX_HALF)
964 				fw_link_status = BNX2_LINK_STATUS_2500HALF;
965 			else
966 				fw_link_status = BNX2_LINK_STATUS_2500FULL;
967 			break;
968 		}
969 
970 		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
971 
972 		if (bp->autoneg) {
973 			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
974 
975 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
976 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977 
978 			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
979 			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
980 				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
981 			else
982 				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
983 		}
984 	}
985 	else
986 		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
987 
988 	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
989 }
990 
991 static char *
bnx2_xceiver_str(struct bnx2 * bp)992 bnx2_xceiver_str(struct bnx2 *bp)
993 {
994 	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
995 		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
996 		 "Copper");
997 }
998 
999 static void
bnx2_report_link(struct bnx2 * bp)1000 bnx2_report_link(struct bnx2 *bp)
1001 {
1002 	if (bp->link_up) {
1003 		netif_carrier_on(bp->dev);
1004 		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005 			    bnx2_xceiver_str(bp),
1006 			    bp->line_speed,
1007 			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1008 
1009 		if (bp->flow_ctrl) {
1010 			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011 				pr_cont(", receive ");
1012 				if (bp->flow_ctrl & FLOW_CTRL_TX)
1013 					pr_cont("& transmit ");
1014 			}
1015 			else {
1016 				pr_cont(", transmit ");
1017 			}
1018 			pr_cont("flow control ON");
1019 		}
1020 		pr_cont("\n");
1021 	} else {
1022 		netif_carrier_off(bp->dev);
1023 		netdev_err(bp->dev, "NIC %s Link is Down\n",
1024 			   bnx2_xceiver_str(bp));
1025 	}
1026 
1027 	bnx2_report_fw_link(bp);
1028 }
1029 
1030 static void
bnx2_resolve_flow_ctrl(struct bnx2 * bp)1031 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032 {
1033 	u32 local_adv, remote_adv;
1034 
1035 	bp->flow_ctrl = 0;
1036 	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037 		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038 
1039 		if (bp->duplex == DUPLEX_FULL) {
1040 			bp->flow_ctrl = bp->req_flow_ctrl;
1041 		}
1042 		return;
1043 	}
1044 
1045 	if (bp->duplex != DUPLEX_FULL) {
1046 		return;
1047 	}
1048 
1049 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051 		u32 val;
1052 
1053 		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054 		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055 			bp->flow_ctrl |= FLOW_CTRL_TX;
1056 		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057 			bp->flow_ctrl |= FLOW_CTRL_RX;
1058 		return;
1059 	}
1060 
1061 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063 
1064 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065 		u32 new_local_adv = 0;
1066 		u32 new_remote_adv = 0;
1067 
1068 		if (local_adv & ADVERTISE_1000XPAUSE)
1069 			new_local_adv |= ADVERTISE_PAUSE_CAP;
1070 		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071 			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072 		if (remote_adv & ADVERTISE_1000XPAUSE)
1073 			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074 		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075 			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076 
1077 		local_adv = new_local_adv;
1078 		remote_adv = new_remote_adv;
1079 	}
1080 
1081 	/* See Table 28B-3 of 802.3ab-1999 spec. */
1082 	if (local_adv & ADVERTISE_PAUSE_CAP) {
1083 		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084 	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086 			}
1087 			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088 				bp->flow_ctrl = FLOW_CTRL_RX;
1089 			}
1090 		}
1091 		else {
1092 			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093 				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094 			}
1095 		}
1096 	}
1097 	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098 		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099 			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100 
1101 			bp->flow_ctrl = FLOW_CTRL_TX;
1102 		}
1103 	}
1104 }
1105 
1106 static int
bnx2_5709s_linkup(struct bnx2 * bp)1107 bnx2_5709s_linkup(struct bnx2 *bp)
1108 {
1109 	u32 val, speed;
1110 
1111 	bp->link_up = 1;
1112 
1113 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114 	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116 
1117 	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118 		bp->line_speed = bp->req_line_speed;
1119 		bp->duplex = bp->req_duplex;
1120 		return 0;
1121 	}
1122 	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123 	switch (speed) {
1124 		case MII_BNX2_GP_TOP_AN_SPEED_10:
1125 			bp->line_speed = SPEED_10;
1126 			break;
1127 		case MII_BNX2_GP_TOP_AN_SPEED_100:
1128 			bp->line_speed = SPEED_100;
1129 			break;
1130 		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131 		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132 			bp->line_speed = SPEED_1000;
1133 			break;
1134 		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135 			bp->line_speed = SPEED_2500;
1136 			break;
1137 	}
1138 	if (val & MII_BNX2_GP_TOP_AN_FD)
1139 		bp->duplex = DUPLEX_FULL;
1140 	else
1141 		bp->duplex = DUPLEX_HALF;
1142 	return 0;
1143 }
1144 
1145 static int
bnx2_5708s_linkup(struct bnx2 * bp)1146 bnx2_5708s_linkup(struct bnx2 *bp)
1147 {
1148 	u32 val;
1149 
1150 	bp->link_up = 1;
1151 	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152 	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153 		case BCM5708S_1000X_STAT1_SPEED_10:
1154 			bp->line_speed = SPEED_10;
1155 			break;
1156 		case BCM5708S_1000X_STAT1_SPEED_100:
1157 			bp->line_speed = SPEED_100;
1158 			break;
1159 		case BCM5708S_1000X_STAT1_SPEED_1G:
1160 			bp->line_speed = SPEED_1000;
1161 			break;
1162 		case BCM5708S_1000X_STAT1_SPEED_2G5:
1163 			bp->line_speed = SPEED_2500;
1164 			break;
1165 	}
1166 	if (val & BCM5708S_1000X_STAT1_FD)
1167 		bp->duplex = DUPLEX_FULL;
1168 	else
1169 		bp->duplex = DUPLEX_HALF;
1170 
1171 	return 0;
1172 }
1173 
1174 static int
bnx2_5706s_linkup(struct bnx2 * bp)1175 bnx2_5706s_linkup(struct bnx2 *bp)
1176 {
1177 	u32 bmcr, local_adv, remote_adv, common;
1178 
1179 	bp->link_up = 1;
1180 	bp->line_speed = SPEED_1000;
1181 
1182 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183 	if (bmcr & BMCR_FULLDPLX) {
1184 		bp->duplex = DUPLEX_FULL;
1185 	}
1186 	else {
1187 		bp->duplex = DUPLEX_HALF;
1188 	}
1189 
1190 	if (!(bmcr & BMCR_ANENABLE)) {
1191 		return 0;
1192 	}
1193 
1194 	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195 	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196 
1197 	common = local_adv & remote_adv;
1198 	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199 
1200 		if (common & ADVERTISE_1000XFULL) {
1201 			bp->duplex = DUPLEX_FULL;
1202 		}
1203 		else {
1204 			bp->duplex = DUPLEX_HALF;
1205 		}
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static int
bnx2_copper_linkup(struct bnx2 * bp)1212 bnx2_copper_linkup(struct bnx2 *bp)
1213 {
1214 	u32 bmcr;
1215 
1216 	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217 
1218 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219 	if (bmcr & BMCR_ANENABLE) {
1220 		u32 local_adv, remote_adv, common;
1221 
1222 		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223 		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224 
1225 		common = local_adv & (remote_adv >> 2);
1226 		if (common & ADVERTISE_1000FULL) {
1227 			bp->line_speed = SPEED_1000;
1228 			bp->duplex = DUPLEX_FULL;
1229 		}
1230 		else if (common & ADVERTISE_1000HALF) {
1231 			bp->line_speed = SPEED_1000;
1232 			bp->duplex = DUPLEX_HALF;
1233 		}
1234 		else {
1235 			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236 			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237 
1238 			common = local_adv & remote_adv;
1239 			if (common & ADVERTISE_100FULL) {
1240 				bp->line_speed = SPEED_100;
1241 				bp->duplex = DUPLEX_FULL;
1242 			}
1243 			else if (common & ADVERTISE_100HALF) {
1244 				bp->line_speed = SPEED_100;
1245 				bp->duplex = DUPLEX_HALF;
1246 			}
1247 			else if (common & ADVERTISE_10FULL) {
1248 				bp->line_speed = SPEED_10;
1249 				bp->duplex = DUPLEX_FULL;
1250 			}
1251 			else if (common & ADVERTISE_10HALF) {
1252 				bp->line_speed = SPEED_10;
1253 				bp->duplex = DUPLEX_HALF;
1254 			}
1255 			else {
1256 				bp->line_speed = 0;
1257 				bp->link_up = 0;
1258 			}
1259 		}
1260 	}
1261 	else {
1262 		if (bmcr & BMCR_SPEED100) {
1263 			bp->line_speed = SPEED_100;
1264 		}
1265 		else {
1266 			bp->line_speed = SPEED_10;
1267 		}
1268 		if (bmcr & BMCR_FULLDPLX) {
1269 			bp->duplex = DUPLEX_FULL;
1270 		}
1271 		else {
1272 			bp->duplex = DUPLEX_HALF;
1273 		}
1274 	}
1275 
1276 	if (bp->link_up) {
1277 		u32 ext_status;
1278 
1279 		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280 		if (ext_status & EXT_STATUS_MDIX)
1281 			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 static void
bnx2_init_rx_context(struct bnx2 * bp,u32 cid)1288 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289 {
1290 	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291 
1292 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293 	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294 	val |= 0x02 << 8;
1295 
1296 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1297 		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298 
1299 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300 }
1301 
1302 static void
bnx2_init_all_rx_contexts(struct bnx2 * bp)1303 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304 {
1305 	int i;
1306 	u32 cid;
1307 
1308 	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309 		if (i == 1)
1310 			cid = RX_RSS_CID;
1311 		bnx2_init_rx_context(bp, cid);
1312 	}
1313 }
1314 
1315 static void
bnx2_set_mac_link(struct bnx2 * bp)1316 bnx2_set_mac_link(struct bnx2 *bp)
1317 {
1318 	u32 val;
1319 
1320 	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321 	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322 		(bp->duplex == DUPLEX_HALF)) {
1323 		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324 	}
1325 
1326 	/* Configure the EMAC mode register. */
1327 	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328 
1329 	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330 		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331 		BNX2_EMAC_MODE_25G_MODE);
1332 
1333 	if (bp->link_up) {
1334 		switch (bp->line_speed) {
1335 			case SPEED_10:
1336 				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337 					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338 					break;
1339 				}
1340 				fallthrough;
1341 			case SPEED_100:
1342 				val |= BNX2_EMAC_MODE_PORT_MII;
1343 				break;
1344 			case SPEED_2500:
1345 				val |= BNX2_EMAC_MODE_25G_MODE;
1346 				fallthrough;
1347 			case SPEED_1000:
1348 				val |= BNX2_EMAC_MODE_PORT_GMII;
1349 				break;
1350 		}
1351 	}
1352 	else {
1353 		val |= BNX2_EMAC_MODE_PORT_GMII;
1354 	}
1355 
1356 	/* Set the MAC to operate in the appropriate duplex mode. */
1357 	if (bp->duplex == DUPLEX_HALF)
1358 		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359 	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360 
1361 	/* Enable/disable rx PAUSE. */
1362 	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363 
1364 	if (bp->flow_ctrl & FLOW_CTRL_RX)
1365 		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366 	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367 
1368 	/* Enable/disable tx PAUSE. */
1369 	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370 	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371 
1372 	if (bp->flow_ctrl & FLOW_CTRL_TX)
1373 		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374 	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375 
1376 	/* Acknowledge the interrupt. */
1377 	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378 
1379 	bnx2_init_all_rx_contexts(bp);
1380 }
1381 
1382 static void
bnx2_enable_bmsr1(struct bnx2 * bp)1383 bnx2_enable_bmsr1(struct bnx2 *bp)
1384 {
1385 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 			       MII_BNX2_BLK_ADDR_GP_STATUS);
1389 }
1390 
1391 static void
bnx2_disable_bmsr1(struct bnx2 * bp)1392 bnx2_disable_bmsr1(struct bnx2 *bp)
1393 {
1394 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395 	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398 }
1399 
1400 static int
bnx2_test_and_enable_2g5(struct bnx2 * bp)1401 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402 {
1403 	u32 up1;
1404 	int ret = 1;
1405 
1406 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407 		return 0;
1408 
1409 	if (bp->autoneg & AUTONEG_SPEED)
1410 		bp->advertising |= ADVERTISED_2500baseX_Full;
1411 
1412 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414 
1415 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1416 	if (!(up1 & BCM5708S_UP1_2G5)) {
1417 		up1 |= BCM5708S_UP1_2G5;
1418 		bnx2_write_phy(bp, bp->mii_up1, up1);
1419 		ret = 0;
1420 	}
1421 
1422 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425 
1426 	return ret;
1427 }
1428 
1429 static int
bnx2_test_and_disable_2g5(struct bnx2 * bp)1430 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431 {
1432 	u32 up1;
1433 	int ret = 0;
1434 
1435 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436 		return 0;
1437 
1438 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440 
1441 	bnx2_read_phy(bp, bp->mii_up1, &up1);
1442 	if (up1 & BCM5708S_UP1_2G5) {
1443 		up1 &= ~BCM5708S_UP1_2G5;
1444 		bnx2_write_phy(bp, bp->mii_up1, up1);
1445 		ret = 1;
1446 	}
1447 
1448 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451 
1452 	return ret;
1453 }
1454 
1455 static void
bnx2_enable_forced_2g5(struct bnx2 * bp)1456 bnx2_enable_forced_2g5(struct bnx2 *bp)
1457 {
1458 	u32 bmcr;
1459 	int err;
1460 
1461 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462 		return;
1463 
1464 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465 		u32 val;
1466 
1467 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470 			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471 			val |= MII_BNX2_SD_MISC1_FORCE |
1472 				MII_BNX2_SD_MISC1_FORCE_2_5G;
1473 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474 		}
1475 
1476 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479 
1480 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482 		if (!err)
1483 			bmcr |= BCM5708S_BMCR_FORCE_2500;
1484 	} else {
1485 		return;
1486 	}
1487 
1488 	if (err)
1489 		return;
1490 
1491 	if (bp->autoneg & AUTONEG_SPEED) {
1492 		bmcr &= ~BMCR_ANENABLE;
1493 		if (bp->req_duplex == DUPLEX_FULL)
1494 			bmcr |= BMCR_FULLDPLX;
1495 	}
1496 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497 }
1498 
1499 static void
bnx2_disable_forced_2g5(struct bnx2 * bp)1500 bnx2_disable_forced_2g5(struct bnx2 *bp)
1501 {
1502 	u32 bmcr;
1503 	int err;
1504 
1505 	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506 		return;
1507 
1508 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509 		u32 val;
1510 
1511 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512 			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1513 		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514 			val &= ~MII_BNX2_SD_MISC1_FORCE;
1515 			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516 		}
1517 
1518 		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521 
1522 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523 		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524 		if (!err)
1525 			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526 	} else {
1527 		return;
1528 	}
1529 
1530 	if (err)
1531 		return;
1532 
1533 	if (bp->autoneg & AUTONEG_SPEED)
1534 		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536 }
1537 
1538 static void
bnx2_5706s_force_link_dn(struct bnx2 * bp,int start)1539 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540 {
1541 	u32 val;
1542 
1543 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545 	if (start)
1546 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547 	else
1548 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549 }
1550 
1551 static int
bnx2_set_link(struct bnx2 * bp)1552 bnx2_set_link(struct bnx2 *bp)
1553 {
1554 	u32 bmsr;
1555 	u8 link_up;
1556 
1557 	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558 		bp->link_up = 1;
1559 		return 0;
1560 	}
1561 
1562 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563 		return 0;
1564 
1565 	link_up = bp->link_up;
1566 
1567 	bnx2_enable_bmsr1(bp);
1568 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570 	bnx2_disable_bmsr1(bp);
1571 
1572 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573 	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574 		u32 val, an_dbg;
1575 
1576 		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577 			bnx2_5706s_force_link_dn(bp, 0);
1578 			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579 		}
1580 		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581 
1582 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585 
1586 		if ((val & BNX2_EMAC_STATUS_LINK) &&
1587 		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588 			bmsr |= BMSR_LSTATUS;
1589 		else
1590 			bmsr &= ~BMSR_LSTATUS;
1591 	}
1592 
1593 	if (bmsr & BMSR_LSTATUS) {
1594 		bp->link_up = 1;
1595 
1596 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597 			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598 				bnx2_5706s_linkup(bp);
1599 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600 				bnx2_5708s_linkup(bp);
1601 			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602 				bnx2_5709s_linkup(bp);
1603 		}
1604 		else {
1605 			bnx2_copper_linkup(bp);
1606 		}
1607 		bnx2_resolve_flow_ctrl(bp);
1608 	}
1609 	else {
1610 		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611 		    (bp->autoneg & AUTONEG_SPEED))
1612 			bnx2_disable_forced_2g5(bp);
1613 
1614 		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615 			u32 bmcr;
1616 
1617 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 			bmcr |= BMCR_ANENABLE;
1619 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620 
1621 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622 		}
1623 		bp->link_up = 0;
1624 	}
1625 
1626 	if (bp->link_up != link_up) {
1627 		bnx2_report_link(bp);
1628 	}
1629 
1630 	bnx2_set_mac_link(bp);
1631 
1632 	return 0;
1633 }
1634 
1635 static int
bnx2_reset_phy(struct bnx2 * bp)1636 bnx2_reset_phy(struct bnx2 *bp)
1637 {
1638 	int i;
1639 	u32 reg;
1640 
1641         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642 
1643 #define PHY_RESET_MAX_WAIT 100
1644 	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645 		udelay(10);
1646 
1647 		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1648 		if (!(reg & BMCR_RESET)) {
1649 			udelay(20);
1650 			break;
1651 		}
1652 	}
1653 	if (i == PHY_RESET_MAX_WAIT) {
1654 		return -EBUSY;
1655 	}
1656 	return 0;
1657 }
1658 
1659 static u32
bnx2_phy_get_pause_adv(struct bnx2 * bp)1660 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661 {
1662 	u32 adv = 0;
1663 
1664 	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665 		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666 
1667 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668 			adv = ADVERTISE_1000XPAUSE;
1669 		}
1670 		else {
1671 			adv = ADVERTISE_PAUSE_CAP;
1672 		}
1673 	}
1674 	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676 			adv = ADVERTISE_1000XPSE_ASYM;
1677 		}
1678 		else {
1679 			adv = ADVERTISE_PAUSE_ASYM;
1680 		}
1681 	}
1682 	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683 		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684 			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685 		}
1686 		else {
1687 			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688 		}
1689 	}
1690 	return adv;
1691 }
1692 
1693 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694 
1695 static int
bnx2_setup_remote_phy(struct bnx2 * bp,u8 port)1696 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697 __releases(&bp->phy_lock)
1698 __acquires(&bp->phy_lock)
1699 {
1700 	u32 speed_arg = 0, pause_adv;
1701 
1702 	pause_adv = bnx2_phy_get_pause_adv(bp);
1703 
1704 	if (bp->autoneg & AUTONEG_SPEED) {
1705 		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706 		if (bp->advertising & ADVERTISED_10baseT_Half)
1707 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708 		if (bp->advertising & ADVERTISED_10baseT_Full)
1709 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710 		if (bp->advertising & ADVERTISED_100baseT_Half)
1711 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712 		if (bp->advertising & ADVERTISED_100baseT_Full)
1713 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714 		if (bp->advertising & ADVERTISED_1000baseT_Full)
1715 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716 		if (bp->advertising & ADVERTISED_2500baseX_Full)
1717 			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718 	} else {
1719 		if (bp->req_line_speed == SPEED_2500)
1720 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721 		else if (bp->req_line_speed == SPEED_1000)
1722 			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723 		else if (bp->req_line_speed == SPEED_100) {
1724 			if (bp->req_duplex == DUPLEX_FULL)
1725 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726 			else
1727 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728 		} else if (bp->req_line_speed == SPEED_10) {
1729 			if (bp->req_duplex == DUPLEX_FULL)
1730 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731 			else
1732 				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733 		}
1734 	}
1735 
1736 	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738 	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739 		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740 
1741 	if (port == PORT_TP)
1742 		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743 			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744 
1745 	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746 
1747 	spin_unlock_bh(&bp->phy_lock);
1748 	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749 	spin_lock_bh(&bp->phy_lock);
1750 
1751 	return 0;
1752 }
1753 
1754 static int
bnx2_setup_serdes_phy(struct bnx2 * bp,u8 port)1755 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756 __releases(&bp->phy_lock)
1757 __acquires(&bp->phy_lock)
1758 {
1759 	u32 adv, bmcr;
1760 	u32 new_adv = 0;
1761 
1762 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763 		return bnx2_setup_remote_phy(bp, port);
1764 
1765 	if (!(bp->autoneg & AUTONEG_SPEED)) {
1766 		u32 new_bmcr;
1767 		int force_link_down = 0;
1768 
1769 		if (bp->req_line_speed == SPEED_2500) {
1770 			if (!bnx2_test_and_enable_2g5(bp))
1771 				force_link_down = 1;
1772 		} else if (bp->req_line_speed == SPEED_1000) {
1773 			if (bnx2_test_and_disable_2g5(bp))
1774 				force_link_down = 1;
1775 		}
1776 		bnx2_read_phy(bp, bp->mii_adv, &adv);
1777 		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778 
1779 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780 		new_bmcr = bmcr & ~BMCR_ANENABLE;
1781 		new_bmcr |= BMCR_SPEED1000;
1782 
1783 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784 			if (bp->req_line_speed == SPEED_2500)
1785 				bnx2_enable_forced_2g5(bp);
1786 			else if (bp->req_line_speed == SPEED_1000) {
1787 				bnx2_disable_forced_2g5(bp);
1788 				new_bmcr &= ~0x2000;
1789 			}
1790 
1791 		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792 			if (bp->req_line_speed == SPEED_2500)
1793 				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794 			else
1795 				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796 		}
1797 
1798 		if (bp->req_duplex == DUPLEX_FULL) {
1799 			adv |= ADVERTISE_1000XFULL;
1800 			new_bmcr |= BMCR_FULLDPLX;
1801 		}
1802 		else {
1803 			adv |= ADVERTISE_1000XHALF;
1804 			new_bmcr &= ~BMCR_FULLDPLX;
1805 		}
1806 		if ((new_bmcr != bmcr) || (force_link_down)) {
1807 			/* Force a link down visible on the other side */
1808 			if (bp->link_up) {
1809 				bnx2_write_phy(bp, bp->mii_adv, adv &
1810 					       ~(ADVERTISE_1000XFULL |
1811 						 ADVERTISE_1000XHALF));
1812 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813 					BMCR_ANRESTART | BMCR_ANENABLE);
1814 
1815 				bp->link_up = 0;
1816 				netif_carrier_off(bp->dev);
1817 				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818 				bnx2_report_link(bp);
1819 			}
1820 			bnx2_write_phy(bp, bp->mii_adv, adv);
1821 			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 		} else {
1823 			bnx2_resolve_flow_ctrl(bp);
1824 			bnx2_set_mac_link(bp);
1825 		}
1826 		return 0;
1827 	}
1828 
1829 	bnx2_test_and_enable_2g5(bp);
1830 
1831 	if (bp->advertising & ADVERTISED_1000baseT_Full)
1832 		new_adv |= ADVERTISE_1000XFULL;
1833 
1834 	new_adv |= bnx2_phy_get_pause_adv(bp);
1835 
1836 	bnx2_read_phy(bp, bp->mii_adv, &adv);
1837 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838 
1839 	bp->serdes_an_pending = 0;
1840 	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841 		/* Force a link down visible on the other side */
1842 		if (bp->link_up) {
1843 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844 			spin_unlock_bh(&bp->phy_lock);
1845 			msleep(20);
1846 			spin_lock_bh(&bp->phy_lock);
1847 		}
1848 
1849 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850 		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851 			BMCR_ANENABLE);
1852 		/* Speed up link-up time when the link partner
1853 		 * does not autonegotiate which is very common
1854 		 * in blade servers. Some blade servers use
1855 		 * IPMI for kerboard input and it's important
1856 		 * to minimize link disruptions. Autoneg. involves
1857 		 * exchanging base pages plus 3 next pages and
1858 		 * normally completes in about 120 msec.
1859 		 */
1860 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861 		bp->serdes_an_pending = 1;
1862 		mod_timer(&bp->timer, jiffies + bp->current_interval);
1863 	} else {
1864 		bnx2_resolve_flow_ctrl(bp);
1865 		bnx2_set_mac_link(bp);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
1871 #define ETHTOOL_ALL_FIBRE_SPEED						\
1872 	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1873 		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874 		(ADVERTISED_1000baseT_Full)
1875 
1876 #define ETHTOOL_ALL_COPPER_SPEED					\
1877 	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1878 	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1879 	ADVERTISED_1000baseT_Full)
1880 
1881 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882 	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883 
1884 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885 
1886 static void
bnx2_set_default_remote_link(struct bnx2 * bp)1887 bnx2_set_default_remote_link(struct bnx2 *bp)
1888 {
1889 	u32 link;
1890 
1891 	if (bp->phy_port == PORT_TP)
1892 		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893 	else
1894 		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895 
1896 	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897 		bp->req_line_speed = 0;
1898 		bp->autoneg |= AUTONEG_SPEED;
1899 		bp->advertising = ADVERTISED_Autoneg;
1900 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901 			bp->advertising |= ADVERTISED_10baseT_Half;
1902 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903 			bp->advertising |= ADVERTISED_10baseT_Full;
1904 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 			bp->advertising |= ADVERTISED_100baseT_Half;
1906 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907 			bp->advertising |= ADVERTISED_100baseT_Full;
1908 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909 			bp->advertising |= ADVERTISED_1000baseT_Full;
1910 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911 			bp->advertising |= ADVERTISED_2500baseX_Full;
1912 	} else {
1913 		bp->autoneg = 0;
1914 		bp->advertising = 0;
1915 		bp->req_duplex = DUPLEX_FULL;
1916 		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917 			bp->req_line_speed = SPEED_10;
1918 			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919 				bp->req_duplex = DUPLEX_HALF;
1920 		}
1921 		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922 			bp->req_line_speed = SPEED_100;
1923 			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924 				bp->req_duplex = DUPLEX_HALF;
1925 		}
1926 		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927 			bp->req_line_speed = SPEED_1000;
1928 		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929 			bp->req_line_speed = SPEED_2500;
1930 	}
1931 }
1932 
1933 static void
bnx2_set_default_link(struct bnx2 * bp)1934 bnx2_set_default_link(struct bnx2 *bp)
1935 {
1936 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937 		bnx2_set_default_remote_link(bp);
1938 		return;
1939 	}
1940 
1941 	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942 	bp->req_line_speed = 0;
1943 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944 		u32 reg;
1945 
1946 		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947 
1948 		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949 		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950 		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951 			bp->autoneg = 0;
1952 			bp->req_line_speed = bp->line_speed = SPEED_1000;
1953 			bp->req_duplex = DUPLEX_FULL;
1954 		}
1955 	} else
1956 		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957 }
1958 
1959 static void
bnx2_send_heart_beat(struct bnx2 * bp)1960 bnx2_send_heart_beat(struct bnx2 *bp)
1961 {
1962 	u32 msg;
1963 	u32 addr;
1964 
1965 	spin_lock(&bp->indirect_lock);
1966 	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967 	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969 	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970 	spin_unlock(&bp->indirect_lock);
1971 }
1972 
1973 static void
bnx2_remote_phy_event(struct bnx2 * bp)1974 bnx2_remote_phy_event(struct bnx2 *bp)
1975 {
1976 	u32 msg;
1977 	u8 link_up = bp->link_up;
1978 	u8 old_port;
1979 
1980 	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981 
1982 	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983 		bnx2_send_heart_beat(bp);
1984 
1985 	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986 
1987 	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988 		bp->link_up = 0;
1989 	else {
1990 		u32 speed;
1991 
1992 		bp->link_up = 1;
1993 		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994 		bp->duplex = DUPLEX_FULL;
1995 		switch (speed) {
1996 			case BNX2_LINK_STATUS_10HALF:
1997 				bp->duplex = DUPLEX_HALF;
1998 				fallthrough;
1999 			case BNX2_LINK_STATUS_10FULL:
2000 				bp->line_speed = SPEED_10;
2001 				break;
2002 			case BNX2_LINK_STATUS_100HALF:
2003 				bp->duplex = DUPLEX_HALF;
2004 				fallthrough;
2005 			case BNX2_LINK_STATUS_100BASE_T4:
2006 			case BNX2_LINK_STATUS_100FULL:
2007 				bp->line_speed = SPEED_100;
2008 				break;
2009 			case BNX2_LINK_STATUS_1000HALF:
2010 				bp->duplex = DUPLEX_HALF;
2011 				fallthrough;
2012 			case BNX2_LINK_STATUS_1000FULL:
2013 				bp->line_speed = SPEED_1000;
2014 				break;
2015 			case BNX2_LINK_STATUS_2500HALF:
2016 				bp->duplex = DUPLEX_HALF;
2017 				fallthrough;
2018 			case BNX2_LINK_STATUS_2500FULL:
2019 				bp->line_speed = SPEED_2500;
2020 				break;
2021 			default:
2022 				bp->line_speed = 0;
2023 				break;
2024 		}
2025 
2026 		bp->flow_ctrl = 0;
2027 		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028 		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029 			if (bp->duplex == DUPLEX_FULL)
2030 				bp->flow_ctrl = bp->req_flow_ctrl;
2031 		} else {
2032 			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033 				bp->flow_ctrl |= FLOW_CTRL_TX;
2034 			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035 				bp->flow_ctrl |= FLOW_CTRL_RX;
2036 		}
2037 
2038 		old_port = bp->phy_port;
2039 		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040 			bp->phy_port = PORT_FIBRE;
2041 		else
2042 			bp->phy_port = PORT_TP;
2043 
2044 		if (old_port != bp->phy_port)
2045 			bnx2_set_default_link(bp);
2046 
2047 	}
2048 	if (bp->link_up != link_up)
2049 		bnx2_report_link(bp);
2050 
2051 	bnx2_set_mac_link(bp);
2052 }
2053 
2054 static int
bnx2_set_remote_link(struct bnx2 * bp)2055 bnx2_set_remote_link(struct bnx2 *bp)
2056 {
2057 	u32 evt_code;
2058 
2059 	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060 	switch (evt_code) {
2061 		case BNX2_FW_EVT_CODE_LINK_EVENT:
2062 			bnx2_remote_phy_event(bp);
2063 			break;
2064 		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065 		default:
2066 			bnx2_send_heart_beat(bp);
2067 			break;
2068 	}
2069 	return 0;
2070 }
2071 
2072 static int
bnx2_setup_copper_phy(struct bnx2 * bp)2073 bnx2_setup_copper_phy(struct bnx2 *bp)
2074 __releases(&bp->phy_lock)
2075 __acquires(&bp->phy_lock)
2076 {
2077 	u32 bmcr, adv_reg, new_adv = 0;
2078 	u32 new_bmcr;
2079 
2080 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081 
2082 	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083 	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084 		    ADVERTISE_PAUSE_ASYM);
2085 
2086 	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087 
2088 	if (bp->autoneg & AUTONEG_SPEED) {
2089 		u32 adv1000_reg;
2090 		u32 new_adv1000 = 0;
2091 
2092 		new_adv |= bnx2_phy_get_pause_adv(bp);
2093 
2094 		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095 		adv1000_reg &= PHY_ALL_1000_SPEED;
2096 
2097 		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098 		if ((adv1000_reg != new_adv1000) ||
2099 			(adv_reg != new_adv) ||
2100 			((bmcr & BMCR_ANENABLE) == 0)) {
2101 
2102 			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103 			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105 				BMCR_ANENABLE);
2106 		}
2107 		else if (bp->link_up) {
2108 			/* Flow ctrl may have changed from auto to forced */
2109 			/* or vice-versa. */
2110 
2111 			bnx2_resolve_flow_ctrl(bp);
2112 			bnx2_set_mac_link(bp);
2113 		}
2114 		return 0;
2115 	}
2116 
2117 	/* advertise nothing when forcing speed */
2118 	if (adv_reg != new_adv)
2119 		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120 
2121 	new_bmcr = 0;
2122 	if (bp->req_line_speed == SPEED_100) {
2123 		new_bmcr |= BMCR_SPEED100;
2124 	}
2125 	if (bp->req_duplex == DUPLEX_FULL) {
2126 		new_bmcr |= BMCR_FULLDPLX;
2127 	}
2128 	if (new_bmcr != bmcr) {
2129 		u32 bmsr;
2130 
2131 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132 		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133 
2134 		if (bmsr & BMSR_LSTATUS) {
2135 			/* Force link down */
2136 			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137 			spin_unlock_bh(&bp->phy_lock);
2138 			msleep(50);
2139 			spin_lock_bh(&bp->phy_lock);
2140 
2141 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142 			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143 		}
2144 
2145 		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146 
2147 		/* Normally, the new speed is setup after the link has
2148 		 * gone down and up again. In some cases, link will not go
2149 		 * down so we need to set up the new speed here.
2150 		 */
2151 		if (bmsr & BMSR_LSTATUS) {
2152 			bp->line_speed = bp->req_line_speed;
2153 			bp->duplex = bp->req_duplex;
2154 			bnx2_resolve_flow_ctrl(bp);
2155 			bnx2_set_mac_link(bp);
2156 		}
2157 	} else {
2158 		bnx2_resolve_flow_ctrl(bp);
2159 		bnx2_set_mac_link(bp);
2160 	}
2161 	return 0;
2162 }
2163 
2164 static int
bnx2_setup_phy(struct bnx2 * bp,u8 port)2165 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166 __releases(&bp->phy_lock)
2167 __acquires(&bp->phy_lock)
2168 {
2169 	if (bp->loopback == MAC_LOOPBACK)
2170 		return 0;
2171 
2172 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173 		return bnx2_setup_serdes_phy(bp, port);
2174 	}
2175 	else {
2176 		return bnx2_setup_copper_phy(bp);
2177 	}
2178 }
2179 
2180 static int
bnx2_init_5709s_phy(struct bnx2 * bp,int reset_phy)2181 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182 {
2183 	u32 val;
2184 
2185 	bp->mii_bmcr = MII_BMCR + 0x10;
2186 	bp->mii_bmsr = MII_BMSR + 0x10;
2187 	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188 	bp->mii_adv = MII_ADVERTISE + 0x10;
2189 	bp->mii_lpa = MII_LPA + 0x10;
2190 	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191 
2192 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193 	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194 
2195 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196 	if (reset_phy)
2197 		bnx2_reset_phy(bp);
2198 
2199 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200 
2201 	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202 	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203 	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204 	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205 
2206 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207 	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209 		val |= BCM5708S_UP1_2G5;
2210 	else
2211 		val &= ~BCM5708S_UP1_2G5;
2212 	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213 
2214 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215 	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216 	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217 	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218 
2219 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220 
2221 	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222 	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223 	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224 
2225 	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226 
2227 	return 0;
2228 }
2229 
2230 static int
bnx2_init_5708s_phy(struct bnx2 * bp,int reset_phy)2231 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232 {
2233 	u32 val;
2234 
2235 	if (reset_phy)
2236 		bnx2_reset_phy(bp);
2237 
2238 	bp->mii_up1 = BCM5708S_UP1;
2239 
2240 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241 	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242 	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243 
2244 	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245 	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246 	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247 
2248 	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249 	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250 	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251 
2252 	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253 		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254 		val |= BCM5708S_UP1_2G5;
2255 		bnx2_write_phy(bp, BCM5708S_UP1, val);
2256 	}
2257 
2258 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261 		/* increase tx signal amplitude */
2262 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 			       BCM5708S_BLK_ADDR_TX_MISC);
2264 		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265 		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266 		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267 		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268 	}
2269 
2270 	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271 	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272 
2273 	if (val) {
2274 		u32 is_backplane;
2275 
2276 		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277 		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279 				       BCM5708S_BLK_ADDR_TX_MISC);
2280 			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281 			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282 				       BCM5708S_BLK_ADDR_DIG);
2283 		}
2284 	}
2285 	return 0;
2286 }
2287 
2288 static int
bnx2_init_5706s_phy(struct bnx2 * bp,int reset_phy)2289 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290 {
2291 	if (reset_phy)
2292 		bnx2_reset_phy(bp);
2293 
2294 	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295 
2296 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297 		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298 
2299 	if (bp->dev->mtu > ETH_DATA_LEN) {
2300 		u32 val;
2301 
2302 		/* Set extended packet length bit */
2303 		bnx2_write_phy(bp, 0x18, 0x7);
2304 		bnx2_read_phy(bp, 0x18, &val);
2305 		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306 
2307 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2308 		bnx2_read_phy(bp, 0x1c, &val);
2309 		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310 	}
2311 	else {
2312 		u32 val;
2313 
2314 		bnx2_write_phy(bp, 0x18, 0x7);
2315 		bnx2_read_phy(bp, 0x18, &val);
2316 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317 
2318 		bnx2_write_phy(bp, 0x1c, 0x6c00);
2319 		bnx2_read_phy(bp, 0x1c, &val);
2320 		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static int
bnx2_init_copper_phy(struct bnx2 * bp,int reset_phy)2327 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328 {
2329 	u32 val;
2330 
2331 	if (reset_phy)
2332 		bnx2_reset_phy(bp);
2333 
2334 	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335 		bnx2_write_phy(bp, 0x18, 0x0c00);
2336 		bnx2_write_phy(bp, 0x17, 0x000a);
2337 		bnx2_write_phy(bp, 0x15, 0x310b);
2338 		bnx2_write_phy(bp, 0x17, 0x201f);
2339 		bnx2_write_phy(bp, 0x15, 0x9506);
2340 		bnx2_write_phy(bp, 0x17, 0x401f);
2341 		bnx2_write_phy(bp, 0x15, 0x14e2);
2342 		bnx2_write_phy(bp, 0x18, 0x0400);
2343 	}
2344 
2345 	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346 		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347 			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2348 		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349 		val &= ~(1 << 8);
2350 		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351 	}
2352 
2353 	if (bp->dev->mtu > ETH_DATA_LEN) {
2354 		/* Set extended packet length bit */
2355 		bnx2_write_phy(bp, 0x18, 0x7);
2356 		bnx2_read_phy(bp, 0x18, &val);
2357 		bnx2_write_phy(bp, 0x18, val | 0x4000);
2358 
2359 		bnx2_read_phy(bp, 0x10, &val);
2360 		bnx2_write_phy(bp, 0x10, val | 0x1);
2361 	}
2362 	else {
2363 		bnx2_write_phy(bp, 0x18, 0x7);
2364 		bnx2_read_phy(bp, 0x18, &val);
2365 		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366 
2367 		bnx2_read_phy(bp, 0x10, &val);
2368 		bnx2_write_phy(bp, 0x10, val & ~0x1);
2369 	}
2370 
2371 	/* ethernet@wirespeed */
2372 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373 	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374 	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375 
2376 	/* auto-mdix */
2377 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378 		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2379 
2380 	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381 	return 0;
2382 }
2383 
2384 
2385 static int
bnx2_init_phy(struct bnx2 * bp,int reset_phy)2386 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387 __releases(&bp->phy_lock)
2388 __acquires(&bp->phy_lock)
2389 {
2390 	u32 val;
2391 	int rc = 0;
2392 
2393 	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394 	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395 
2396 	bp->mii_bmcr = MII_BMCR;
2397 	bp->mii_bmsr = MII_BMSR;
2398 	bp->mii_bmsr1 = MII_BMSR;
2399 	bp->mii_adv = MII_ADVERTISE;
2400 	bp->mii_lpa = MII_LPA;
2401 
2402 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403 
2404 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405 		goto setup_phy;
2406 
2407 	bnx2_read_phy(bp, MII_PHYSID1, &val);
2408 	bp->phy_id = val << 16;
2409 	bnx2_read_phy(bp, MII_PHYSID2, &val);
2410 	bp->phy_id |= val & 0xffff;
2411 
2412 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414 			rc = bnx2_init_5706s_phy(bp, reset_phy);
2415 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416 			rc = bnx2_init_5708s_phy(bp, reset_phy);
2417 		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418 			rc = bnx2_init_5709s_phy(bp, reset_phy);
2419 	}
2420 	else {
2421 		rc = bnx2_init_copper_phy(bp, reset_phy);
2422 	}
2423 
2424 setup_phy:
2425 	if (!rc)
2426 		rc = bnx2_setup_phy(bp, bp->phy_port);
2427 
2428 	return rc;
2429 }
2430 
2431 static int
bnx2_set_mac_loopback(struct bnx2 * bp)2432 bnx2_set_mac_loopback(struct bnx2 *bp)
2433 {
2434 	u32 mac_mode;
2435 
2436 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437 	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438 	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440 	bp->link_up = 1;
2441 	return 0;
2442 }
2443 
2444 static int bnx2_test_link(struct bnx2 *);
2445 
2446 static int
bnx2_set_phy_loopback(struct bnx2 * bp)2447 bnx2_set_phy_loopback(struct bnx2 *bp)
2448 {
2449 	u32 mac_mode;
2450 	int rc, i;
2451 
2452 	spin_lock_bh(&bp->phy_lock);
2453 	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454 			    BMCR_SPEED1000);
2455 	spin_unlock_bh(&bp->phy_lock);
2456 	if (rc)
2457 		return rc;
2458 
2459 	for (i = 0; i < 10; i++) {
2460 		if (bnx2_test_link(bp) == 0)
2461 			break;
2462 		msleep(100);
2463 	}
2464 
2465 	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466 	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467 		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468 		      BNX2_EMAC_MODE_25G_MODE);
2469 
2470 	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471 	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472 	bp->link_up = 1;
2473 	return 0;
2474 }
2475 
2476 static void
bnx2_dump_mcp_state(struct bnx2 * bp)2477 bnx2_dump_mcp_state(struct bnx2 *bp)
2478 {
2479 	struct net_device *dev = bp->dev;
2480 	u32 mcp_p0, mcp_p1;
2481 
2482 	netdev_err(dev, "<--- start MCP states dump --->\n");
2483 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484 		mcp_p0 = BNX2_MCP_STATE_P0;
2485 		mcp_p1 = BNX2_MCP_STATE_P1;
2486 	} else {
2487 		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488 		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489 	}
2490 	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491 		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492 	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496 	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499 		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500 	netdev_err(dev, "DEBUG: shmem states:\n");
2501 	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502 		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503 		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2504 		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505 	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506 	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507 		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508 		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509 	pr_cont(" condition[%08x]\n",
2510 		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511 	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512 	DP_SHMEM_LINE(bp, 0x3cc);
2513 	DP_SHMEM_LINE(bp, 0x3dc);
2514 	DP_SHMEM_LINE(bp, 0x3ec);
2515 	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516 	netdev_err(dev, "<--- end MCP states dump --->\n");
2517 }
2518 
2519 static int
bnx2_fw_sync(struct bnx2 * bp,u32 msg_data,int ack,int silent)2520 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521 {
2522 	int i;
2523 	u32 val;
2524 
2525 	bp->fw_wr_seq++;
2526 	msg_data |= bp->fw_wr_seq;
2527 	bp->fw_last_msg = msg_data;
2528 
2529 	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530 
2531 	if (!ack)
2532 		return 0;
2533 
2534 	/* wait for an acknowledgement. */
2535 	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536 		msleep(10);
2537 
2538 		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539 
2540 		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541 			break;
2542 	}
2543 	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544 		return 0;
2545 
2546 	/* If we timed out, inform the firmware that this is the case. */
2547 	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548 		msg_data &= ~BNX2_DRV_MSG_CODE;
2549 		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550 
2551 		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552 		if (!silent) {
2553 			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554 			bnx2_dump_mcp_state(bp);
2555 		}
2556 
2557 		return -EBUSY;
2558 	}
2559 
2560 	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561 		return -EIO;
2562 
2563 	return 0;
2564 }
2565 
2566 static int
bnx2_init_5709_context(struct bnx2 * bp)2567 bnx2_init_5709_context(struct bnx2 *bp)
2568 {
2569 	int i, ret = 0;
2570 	u32 val;
2571 
2572 	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573 	val |= (BNX2_PAGE_BITS - 8) << 16;
2574 	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575 	for (i = 0; i < 10; i++) {
2576 		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577 		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578 			break;
2579 		udelay(2);
2580 	}
2581 	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582 		return -EBUSY;
2583 
2584 	for (i = 0; i < bp->ctx_pages; i++) {
2585 		int j;
2586 
2587 		if (bp->ctx_blk[i])
2588 			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589 		else
2590 			return -ENOMEM;
2591 
2592 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593 			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2594 			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596 			(u64) bp->ctx_blk_mapping[i] >> 32);
2597 		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599 		for (j = 0; j < 10; j++) {
2600 
2601 			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603 				break;
2604 			udelay(5);
2605 		}
2606 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607 			ret = -EBUSY;
2608 			break;
2609 		}
2610 	}
2611 	return ret;
2612 }
2613 
2614 static void
bnx2_init_context(struct bnx2 * bp)2615 bnx2_init_context(struct bnx2 *bp)
2616 {
2617 	u32 vcid;
2618 
2619 	vcid = 96;
2620 	while (vcid) {
2621 		u32 vcid_addr, pcid_addr, offset;
2622 		int i;
2623 
2624 		vcid--;
2625 
2626 		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627 			u32 new_vcid;
2628 
2629 			vcid_addr = GET_PCID_ADDR(vcid);
2630 			if (vcid & 0x8) {
2631 				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632 			}
2633 			else {
2634 				new_vcid = vcid;
2635 			}
2636 			pcid_addr = GET_PCID_ADDR(new_vcid);
2637 		}
2638 		else {
2639 	    		vcid_addr = GET_CID_ADDR(vcid);
2640 			pcid_addr = vcid_addr;
2641 		}
2642 
2643 		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644 			vcid_addr += (i << PHY_CTX_SHIFT);
2645 			pcid_addr += (i << PHY_CTX_SHIFT);
2646 
2647 			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648 			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649 
2650 			/* Zero out the context. */
2651 			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652 				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653 		}
2654 	}
2655 }
2656 
2657 static int
bnx2_alloc_bad_rbuf(struct bnx2 * bp)2658 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659 {
2660 	u16 *good_mbuf;
2661 	u32 good_mbuf_cnt;
2662 	u32 val;
2663 
2664 	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665 	if (!good_mbuf)
2666 		return -ENOMEM;
2667 
2668 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669 		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670 
2671 	good_mbuf_cnt = 0;
2672 
2673 	/* Allocate a bunch of mbufs and save the good ones in an array. */
2674 	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676 		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677 				BNX2_RBUF_COMMAND_ALLOC_REQ);
2678 
2679 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680 
2681 		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682 
2683 		/* The addresses with Bit 9 set are bad memory blocks. */
2684 		if (!(val & (1 << 9))) {
2685 			good_mbuf[good_mbuf_cnt] = (u16) val;
2686 			good_mbuf_cnt++;
2687 		}
2688 
2689 		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690 	}
2691 
2692 	/* Free the good ones back to the mbuf pool thus discarding
2693 	 * all the bad ones. */
2694 	while (good_mbuf_cnt) {
2695 		good_mbuf_cnt--;
2696 
2697 		val = good_mbuf[good_mbuf_cnt];
2698 		val = (val << 9) | val | 1;
2699 
2700 		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701 	}
2702 	kfree(good_mbuf);
2703 	return 0;
2704 }
2705 
2706 static void
bnx2_set_mac_addr(struct bnx2 * bp,const u8 * mac_addr,u32 pos)2707 bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2708 {
2709 	u32 val;
2710 
2711 	val = (mac_addr[0] << 8) | mac_addr[1];
2712 
2713 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714 
2715 	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716 		(mac_addr[4] << 8) | mac_addr[5];
2717 
2718 	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719 }
2720 
2721 static inline int
bnx2_alloc_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2722 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723 {
2724 	dma_addr_t mapping;
2725 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726 	struct bnx2_rx_bd *rxbd =
2727 		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728 	struct page *page = alloc_page(gfp);
2729 
2730 	if (!page)
2731 		return -ENOMEM;
2732 	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733 			       DMA_FROM_DEVICE);
2734 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735 		__free_page(page);
2736 		return -EIO;
2737 	}
2738 
2739 	rx_pg->page = page;
2740 	dma_unmap_addr_set(rx_pg, mapping, mapping);
2741 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743 	return 0;
2744 }
2745 
2746 static void
bnx2_free_rx_page(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index)2747 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748 {
2749 	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750 	struct page *page = rx_pg->page;
2751 
2752 	if (!page)
2753 		return;
2754 
2755 	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756 		       PAGE_SIZE, DMA_FROM_DEVICE);
2757 
2758 	__free_page(page);
2759 	rx_pg->page = NULL;
2760 }
2761 
2762 static inline int
bnx2_alloc_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u16 index,gfp_t gfp)2763 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764 {
2765 	u8 *data;
2766 	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767 	dma_addr_t mapping;
2768 	struct bnx2_rx_bd *rxbd =
2769 		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770 
2771 	data = kmalloc(bp->rx_buf_size, gfp);
2772 	if (!data)
2773 		return -ENOMEM;
2774 
2775 	mapping = dma_map_single(&bp->pdev->dev,
2776 				 get_l2_fhdr(data),
2777 				 bp->rx_buf_use_size,
2778 				 DMA_FROM_DEVICE);
2779 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780 		kfree(data);
2781 		return -EIO;
2782 	}
2783 
2784 	rx_buf->data = data;
2785 	dma_unmap_addr_set(rx_buf, mapping, mapping);
2786 
2787 	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788 	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789 
2790 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791 
2792 	return 0;
2793 }
2794 
2795 static int
bnx2_phy_event_is_set(struct bnx2 * bp,struct bnx2_napi * bnapi,u32 event)2796 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797 {
2798 	struct status_block *sblk = bnapi->status_blk.msi;
2799 	u32 new_link_state, old_link_state;
2800 	int is_set = 1;
2801 
2802 	new_link_state = sblk->status_attn_bits & event;
2803 	old_link_state = sblk->status_attn_bits_ack & event;
2804 	if (new_link_state != old_link_state) {
2805 		if (new_link_state)
2806 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807 		else
2808 			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809 	} else
2810 		is_set = 0;
2811 
2812 	return is_set;
2813 }
2814 
2815 static void
bnx2_phy_int(struct bnx2 * bp,struct bnx2_napi * bnapi)2816 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817 {
2818 	spin_lock(&bp->phy_lock);
2819 
2820 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821 		bnx2_set_link(bp);
2822 	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823 		bnx2_set_remote_link(bp);
2824 
2825 	spin_unlock(&bp->phy_lock);
2826 
2827 }
2828 
2829 static inline u16
bnx2_get_hw_tx_cons(struct bnx2_napi * bnapi)2830 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831 {
2832 	u16 cons;
2833 
2834 	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835 
2836 	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837 		cons++;
2838 	return cons;
2839 }
2840 
2841 static int
bnx2_tx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)2842 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843 {
2844 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845 	u16 hw_cons, sw_cons, sw_ring_cons;
2846 	int tx_pkt = 0, index;
2847 	unsigned int tx_bytes = 0;
2848 	struct netdev_queue *txq;
2849 
2850 	index = (bnapi - bp->bnx2_napi);
2851 	txq = netdev_get_tx_queue(bp->dev, index);
2852 
2853 	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854 	sw_cons = txr->tx_cons;
2855 
2856 	while (sw_cons != hw_cons) {
2857 		struct bnx2_sw_tx_bd *tx_buf;
2858 		struct sk_buff *skb;
2859 		int i, last;
2860 
2861 		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862 
2863 		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864 		skb = tx_buf->skb;
2865 
2866 		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867 		prefetch(&skb->end);
2868 
2869 		/* partial BD completions possible with TSO packets */
2870 		if (tx_buf->is_gso) {
2871 			u16 last_idx, last_ring_idx;
2872 
2873 			last_idx = sw_cons + tx_buf->nr_frags + 1;
2874 			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875 			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876 				last_idx++;
2877 			}
2878 			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879 				break;
2880 			}
2881 		}
2882 
2883 		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884 			skb_headlen(skb), DMA_TO_DEVICE);
2885 
2886 		tx_buf->skb = NULL;
2887 		last = tx_buf->nr_frags;
2888 
2889 		for (i = 0; i < last; i++) {
2890 			struct bnx2_sw_tx_bd *tx_buf;
2891 
2892 			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893 
2894 			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895 			dma_unmap_page(&bp->pdev->dev,
2896 				dma_unmap_addr(tx_buf, mapping),
2897 				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898 				DMA_TO_DEVICE);
2899 		}
2900 
2901 		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902 
2903 		tx_bytes += skb->len;
2904 		dev_kfree_skb_any(skb);
2905 		tx_pkt++;
2906 		if (tx_pkt == budget)
2907 			break;
2908 
2909 		if (hw_cons == sw_cons)
2910 			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911 	}
2912 
2913 	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914 	txr->hw_tx_cons = hw_cons;
2915 	txr->tx_cons = sw_cons;
2916 
2917 	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2918 	 * before checking for netif_tx_queue_stopped().  Without the
2919 	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2920 	 * will miss it and cause the queue to be stopped forever.
2921 	 */
2922 	smp_mb();
2923 
2924 	if (unlikely(netif_tx_queue_stopped(txq)) &&
2925 		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926 		__netif_tx_lock(txq, smp_processor_id());
2927 		if ((netif_tx_queue_stopped(txq)) &&
2928 		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929 			netif_tx_wake_queue(txq);
2930 		__netif_tx_unlock(txq);
2931 	}
2932 
2933 	return tx_pkt;
2934 }
2935 
2936 static void
bnx2_reuse_rx_skb_pages(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,struct sk_buff * skb,int count)2937 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938 			struct sk_buff *skb, int count)
2939 {
2940 	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941 	struct bnx2_rx_bd *cons_bd, *prod_bd;
2942 	int i;
2943 	u16 hw_prod, prod;
2944 	u16 cons = rxr->rx_pg_cons;
2945 
2946 	cons_rx_pg = &rxr->rx_pg_ring[cons];
2947 
2948 	/* The caller was unable to allocate a new page to replace the
2949 	 * last one in the frags array, so we need to recycle that page
2950 	 * and then free the skb.
2951 	 */
2952 	if (skb) {
2953 		struct page *page;
2954 		struct skb_shared_info *shinfo;
2955 
2956 		shinfo = skb_shinfo(skb);
2957 		shinfo->nr_frags--;
2958 		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959 		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960 
2961 		cons_rx_pg->page = page;
2962 		dev_kfree_skb(skb);
2963 	}
2964 
2965 	hw_prod = rxr->rx_pg_prod;
2966 
2967 	for (i = 0; i < count; i++) {
2968 		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969 
2970 		prod_rx_pg = &rxr->rx_pg_ring[prod];
2971 		cons_rx_pg = &rxr->rx_pg_ring[cons];
2972 		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973 						[BNX2_RX_IDX(cons)];
2974 		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975 						[BNX2_RX_IDX(prod)];
2976 
2977 		if (prod != cons) {
2978 			prod_rx_pg->page = cons_rx_pg->page;
2979 			cons_rx_pg->page = NULL;
2980 			dma_unmap_addr_set(prod_rx_pg, mapping,
2981 				dma_unmap_addr(cons_rx_pg, mapping));
2982 
2983 			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984 			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985 
2986 		}
2987 		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988 		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989 	}
2990 	rxr->rx_pg_prod = hw_prod;
2991 	rxr->rx_pg_cons = cons;
2992 }
2993 
2994 static inline void
bnx2_reuse_rx_data(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,u16 cons,u16 prod)2995 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996 		   u8 *data, u16 cons, u16 prod)
2997 {
2998 	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999 	struct bnx2_rx_bd *cons_bd, *prod_bd;
3000 
3001 	cons_rx_buf = &rxr->rx_buf_ring[cons];
3002 	prod_rx_buf = &rxr->rx_buf_ring[prod];
3003 
3004 	dma_sync_single_for_device(&bp->pdev->dev,
3005 		dma_unmap_addr(cons_rx_buf, mapping),
3006 		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3007 
3008 	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009 
3010 	prod_rx_buf->data = data;
3011 
3012 	if (cons == prod)
3013 		return;
3014 
3015 	dma_unmap_addr_set(prod_rx_buf, mapping,
3016 			dma_unmap_addr(cons_rx_buf, mapping));
3017 
3018 	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019 	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020 	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021 	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022 }
3023 
3024 static struct sk_buff *
bnx2_rx_skb(struct bnx2 * bp,struct bnx2_rx_ring_info * rxr,u8 * data,unsigned int len,unsigned int hdr_len,dma_addr_t dma_addr,u32 ring_idx)3025 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026 	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027 	    u32 ring_idx)
3028 {
3029 	int err;
3030 	u16 prod = ring_idx & 0xffff;
3031 	struct sk_buff *skb;
3032 
3033 	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034 	if (unlikely(err)) {
3035 		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036 error:
3037 		if (hdr_len) {
3038 			unsigned int raw_len = len + 4;
3039 			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040 
3041 			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042 		}
3043 		return NULL;
3044 	}
3045 
3046 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047 			 DMA_FROM_DEVICE);
3048 	skb = build_skb(data, 0);
3049 	if (!skb) {
3050 		kfree(data);
3051 		goto error;
3052 	}
3053 	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054 	if (hdr_len == 0) {
3055 		skb_put(skb, len);
3056 		return skb;
3057 	} else {
3058 		unsigned int i, frag_len, frag_size, pages;
3059 		struct bnx2_sw_pg *rx_pg;
3060 		u16 pg_cons = rxr->rx_pg_cons;
3061 		u16 pg_prod = rxr->rx_pg_prod;
3062 
3063 		frag_size = len + 4 - hdr_len;
3064 		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065 		skb_put(skb, hdr_len);
3066 
3067 		for (i = 0; i < pages; i++) {
3068 			dma_addr_t mapping_old;
3069 
3070 			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071 			if (unlikely(frag_len <= 4)) {
3072 				unsigned int tail = 4 - frag_len;
3073 
3074 				rxr->rx_pg_cons = pg_cons;
3075 				rxr->rx_pg_prod = pg_prod;
3076 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077 							pages - i);
3078 				skb->len -= tail;
3079 				if (i == 0) {
3080 					skb->tail -= tail;
3081 				} else {
3082 					skb_frag_t *frag =
3083 						&skb_shinfo(skb)->frags[i - 1];
3084 					skb_frag_size_sub(frag, tail);
3085 					skb->data_len -= tail;
3086 				}
3087 				return skb;
3088 			}
3089 			rx_pg = &rxr->rx_pg_ring[pg_cons];
3090 
3091 			/* Don't unmap yet.  If we're unable to allocate a new
3092 			 * page, we need to recycle the page and the DMA addr.
3093 			 */
3094 			mapping_old = dma_unmap_addr(rx_pg, mapping);
3095 			if (i == pages - 1)
3096 				frag_len -= 4;
3097 
3098 			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099 			rx_pg->page = NULL;
3100 
3101 			err = bnx2_alloc_rx_page(bp, rxr,
3102 						 BNX2_RX_PG_RING_IDX(pg_prod),
3103 						 GFP_ATOMIC);
3104 			if (unlikely(err)) {
3105 				rxr->rx_pg_cons = pg_cons;
3106 				rxr->rx_pg_prod = pg_prod;
3107 				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108 							pages - i);
3109 				return NULL;
3110 			}
3111 
3112 			dma_unmap_page(&bp->pdev->dev, mapping_old,
3113 				       PAGE_SIZE, DMA_FROM_DEVICE);
3114 
3115 			frag_size -= frag_len;
3116 			skb->data_len += frag_len;
3117 			skb->truesize += PAGE_SIZE;
3118 			skb->len += frag_len;
3119 
3120 			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121 			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122 		}
3123 		rxr->rx_pg_prod = pg_prod;
3124 		rxr->rx_pg_cons = pg_cons;
3125 	}
3126 	return skb;
3127 }
3128 
3129 static inline u16
bnx2_get_hw_rx_cons(struct bnx2_napi * bnapi)3130 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131 {
3132 	u16 cons;
3133 
3134 	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3135 
3136 	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3137 		cons++;
3138 	return cons;
3139 }
3140 
3141 static int
bnx2_rx_int(struct bnx2 * bp,struct bnx2_napi * bnapi,int budget)3142 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3143 {
3144 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3145 	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3146 	struct l2_fhdr *rx_hdr;
3147 	int rx_pkt = 0, pg_ring_used = 0;
3148 
3149 	if (budget <= 0)
3150 		return rx_pkt;
3151 
3152 	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3153 	sw_cons = rxr->rx_cons;
3154 	sw_prod = rxr->rx_prod;
3155 
3156 	/* Memory barrier necessary as speculative reads of the rx
3157 	 * buffer can be ahead of the index in the status block
3158 	 */
3159 	rmb();
3160 	while (sw_cons != hw_cons) {
3161 		unsigned int len, hdr_len;
3162 		u32 status;
3163 		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3164 		struct sk_buff *skb;
3165 		dma_addr_t dma_addr;
3166 		u8 *data;
3167 		u16 next_ring_idx;
3168 
3169 		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3170 		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3171 
3172 		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3173 		data = rx_buf->data;
3174 		rx_buf->data = NULL;
3175 
3176 		rx_hdr = get_l2_fhdr(data);
3177 		prefetch(rx_hdr);
3178 
3179 		dma_addr = dma_unmap_addr(rx_buf, mapping);
3180 
3181 		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3182 			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3183 			DMA_FROM_DEVICE);
3184 
3185 		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3186 		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3187 		prefetch(get_l2_fhdr(next_rx_buf->data));
3188 
3189 		len = rx_hdr->l2_fhdr_pkt_len;
3190 		status = rx_hdr->l2_fhdr_status;
3191 
3192 		hdr_len = 0;
3193 		if (status & L2_FHDR_STATUS_SPLIT) {
3194 			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3195 			pg_ring_used = 1;
3196 		} else if (len > bp->rx_jumbo_thresh) {
3197 			hdr_len = bp->rx_jumbo_thresh;
3198 			pg_ring_used = 1;
3199 		}
3200 
3201 		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3202 				       L2_FHDR_ERRORS_PHY_DECODE |
3203 				       L2_FHDR_ERRORS_ALIGNMENT |
3204 				       L2_FHDR_ERRORS_TOO_SHORT |
3205 				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3206 
3207 			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208 					  sw_ring_prod);
3209 			if (pg_ring_used) {
3210 				int pages;
3211 
3212 				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3213 
3214 				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3215 			}
3216 			goto next_rx;
3217 		}
3218 
3219 		len -= 4;
3220 
3221 		if (len <= bp->rx_copy_thresh) {
3222 			skb = netdev_alloc_skb(bp->dev, len + 6);
3223 			if (!skb) {
3224 				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3225 						  sw_ring_prod);
3226 				goto next_rx;
3227 			}
3228 
3229 			/* aligned copy */
3230 			memcpy(skb->data,
3231 			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3232 			       len + 6);
3233 			skb_reserve(skb, 6);
3234 			skb_put(skb, len);
3235 
3236 			bnx2_reuse_rx_data(bp, rxr, data,
3237 				sw_ring_cons, sw_ring_prod);
3238 
3239 		} else {
3240 			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3241 					  (sw_ring_cons << 16) | sw_ring_prod);
3242 			if (!skb)
3243 				goto next_rx;
3244 		}
3245 		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3246 		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3248 
3249 		skb->protocol = eth_type_trans(skb, bp->dev);
3250 
3251 		if (len > (bp->dev->mtu + ETH_HLEN) &&
3252 		    skb->protocol != htons(0x8100) &&
3253 		    skb->protocol != htons(ETH_P_8021AD)) {
3254 
3255 			dev_kfree_skb(skb);
3256 			goto next_rx;
3257 
3258 		}
3259 
3260 		skb_checksum_none_assert(skb);
3261 		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3262 			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3263 			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3264 
3265 			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3266 					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3267 				skb->ip_summed = CHECKSUM_UNNECESSARY;
3268 		}
3269 		if ((bp->dev->features & NETIF_F_RXHASH) &&
3270 		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3271 		     L2_FHDR_STATUS_USE_RXHASH))
3272 			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3273 				     PKT_HASH_TYPE_L3);
3274 
3275 		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3276 		napi_gro_receive(&bnapi->napi, skb);
3277 		rx_pkt++;
3278 
3279 next_rx:
3280 		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3281 		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3282 
3283 		if (rx_pkt == budget)
3284 			break;
3285 
3286 		/* Refresh hw_cons to see if there is new work */
3287 		if (sw_cons == hw_cons) {
3288 			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3289 			rmb();
3290 		}
3291 	}
3292 	rxr->rx_cons = sw_cons;
3293 	rxr->rx_prod = sw_prod;
3294 
3295 	if (pg_ring_used)
3296 		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3297 
3298 	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3299 
3300 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3301 
3302 	return rx_pkt;
3303 
3304 }
3305 
3306 /* MSI ISR - The only difference between this and the INTx ISR
3307  * is that the MSI interrupt is always serviced.
3308  */
3309 static irqreturn_t
bnx2_msi(int irq,void * dev_instance)3310 bnx2_msi(int irq, void *dev_instance)
3311 {
3312 	struct bnx2_napi *bnapi = dev_instance;
3313 	struct bnx2 *bp = bnapi->bp;
3314 
3315 	prefetch(bnapi->status_blk.msi);
3316 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3317 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3318 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3319 
3320 	/* Return here if interrupt is disabled. */
3321 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322 		return IRQ_HANDLED;
3323 
3324 	napi_schedule(&bnapi->napi);
3325 
3326 	return IRQ_HANDLED;
3327 }
3328 
3329 static irqreturn_t
bnx2_msi_1shot(int irq,void * dev_instance)3330 bnx2_msi_1shot(int irq, void *dev_instance)
3331 {
3332 	struct bnx2_napi *bnapi = dev_instance;
3333 	struct bnx2 *bp = bnapi->bp;
3334 
3335 	prefetch(bnapi->status_blk.msi);
3336 
3337 	/* Return here if interrupt is disabled. */
3338 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339 		return IRQ_HANDLED;
3340 
3341 	napi_schedule(&bnapi->napi);
3342 
3343 	return IRQ_HANDLED;
3344 }
3345 
3346 static irqreturn_t
bnx2_interrupt(int irq,void * dev_instance)3347 bnx2_interrupt(int irq, void *dev_instance)
3348 {
3349 	struct bnx2_napi *bnapi = dev_instance;
3350 	struct bnx2 *bp = bnapi->bp;
3351 	struct status_block *sblk = bnapi->status_blk.msi;
3352 
3353 	/* When using INTx, it is possible for the interrupt to arrive
3354 	 * at the CPU before the status block posted prior to the
3355 	 * interrupt. Reading a register will flush the status block.
3356 	 * When using MSI, the MSI message will always complete after
3357 	 * the status block write.
3358 	 */
3359 	if ((sblk->status_idx == bnapi->last_status_idx) &&
3360 	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3361 	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3362 		return IRQ_NONE;
3363 
3364 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3365 		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3366 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367 
3368 	/* Read back to deassert IRQ immediately to avoid too many
3369 	 * spurious interrupts.
3370 	 */
3371 	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372 
3373 	/* Return here if interrupt is shared and is disabled. */
3374 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3375 		return IRQ_HANDLED;
3376 
3377 	if (napi_schedule_prep(&bnapi->napi)) {
3378 		bnapi->last_status_idx = sblk->status_idx;
3379 		__napi_schedule(&bnapi->napi);
3380 	}
3381 
3382 	return IRQ_HANDLED;
3383 }
3384 
3385 static inline int
bnx2_has_fast_work(struct bnx2_napi * bnapi)3386 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3387 {
3388 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3389 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3390 
3391 	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3392 	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3393 		return 1;
3394 	return 0;
3395 }
3396 
3397 #define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3398 				 STATUS_ATTN_BITS_TIMER_ABORT)
3399 
3400 static inline int
bnx2_has_work(struct bnx2_napi * bnapi)3401 bnx2_has_work(struct bnx2_napi *bnapi)
3402 {
3403 	struct status_block *sblk = bnapi->status_blk.msi;
3404 
3405 	if (bnx2_has_fast_work(bnapi))
3406 		return 1;
3407 
3408 #ifdef BCM_CNIC
3409 	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3410 		return 1;
3411 #endif
3412 
3413 	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3414 	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3415 		return 1;
3416 
3417 	return 0;
3418 }
3419 
3420 static void
bnx2_chk_missed_msi(struct bnx2 * bp)3421 bnx2_chk_missed_msi(struct bnx2 *bp)
3422 {
3423 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3424 	u32 msi_ctrl;
3425 
3426 	if (bnx2_has_work(bnapi)) {
3427 		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3428 		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3429 			return;
3430 
3431 		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3432 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3433 				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3434 			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3435 			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3436 		}
3437 	}
3438 
3439 	bp->idle_chk_status_idx = bnapi->last_status_idx;
3440 }
3441 
3442 #ifdef BCM_CNIC
bnx2_poll_cnic(struct bnx2 * bp,struct bnx2_napi * bnapi)3443 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444 {
3445 	struct cnic_ops *c_ops;
3446 
3447 	if (!bnapi->cnic_present)
3448 		return;
3449 
3450 	rcu_read_lock();
3451 	c_ops = rcu_dereference(bp->cnic_ops);
3452 	if (c_ops)
3453 		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3454 						      bnapi->status_blk.msi);
3455 	rcu_read_unlock();
3456 }
3457 #endif
3458 
bnx2_poll_link(struct bnx2 * bp,struct bnx2_napi * bnapi)3459 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3460 {
3461 	struct status_block *sblk = bnapi->status_blk.msi;
3462 	u32 status_attn_bits = sblk->status_attn_bits;
3463 	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3464 
3465 	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3466 	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3467 
3468 		bnx2_phy_int(bp, bnapi);
3469 
3470 		/* This is needed to take care of transient status
3471 		 * during link changes.
3472 		 */
3473 		BNX2_WR(bp, BNX2_HC_COMMAND,
3474 			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3475 		BNX2_RD(bp, BNX2_HC_COMMAND);
3476 	}
3477 }
3478 
bnx2_poll_work(struct bnx2 * bp,struct bnx2_napi * bnapi,int work_done,int budget)3479 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3480 			  int work_done, int budget)
3481 {
3482 	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3483 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3484 
3485 	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3486 		bnx2_tx_int(bp, bnapi, 0);
3487 
3488 	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3489 		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3490 
3491 	return work_done;
3492 }
3493 
bnx2_poll_msix(struct napi_struct * napi,int budget)3494 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3495 {
3496 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3497 	struct bnx2 *bp = bnapi->bp;
3498 	int work_done = 0;
3499 	struct status_block_msix *sblk = bnapi->status_blk.msix;
3500 
3501 	while (1) {
3502 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3503 		if (unlikely(work_done >= budget))
3504 			break;
3505 
3506 		bnapi->last_status_idx = sblk->status_idx;
3507 		/* status idx must be read before checking for more work. */
3508 		rmb();
3509 		if (likely(!bnx2_has_fast_work(bnapi))) {
3510 
3511 			napi_complete_done(napi, work_done);
3512 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3513 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514 				bnapi->last_status_idx);
3515 			break;
3516 		}
3517 	}
3518 	return work_done;
3519 }
3520 
bnx2_poll(struct napi_struct * napi,int budget)3521 static int bnx2_poll(struct napi_struct *napi, int budget)
3522 {
3523 	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3524 	struct bnx2 *bp = bnapi->bp;
3525 	int work_done = 0;
3526 	struct status_block *sblk = bnapi->status_blk.msi;
3527 
3528 	while (1) {
3529 		bnx2_poll_link(bp, bnapi);
3530 
3531 		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532 
3533 #ifdef BCM_CNIC
3534 		bnx2_poll_cnic(bp, bnapi);
3535 #endif
3536 
3537 		/* bnapi->last_status_idx is used below to tell the hw how
3538 		 * much work has been processed, so we must read it before
3539 		 * checking for more work.
3540 		 */
3541 		bnapi->last_status_idx = sblk->status_idx;
3542 
3543 		if (unlikely(work_done >= budget))
3544 			break;
3545 
3546 		rmb();
3547 		if (likely(!bnx2_has_work(bnapi))) {
3548 			napi_complete_done(napi, work_done);
3549 			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3550 				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551 					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552 					bnapi->last_status_idx);
3553 				break;
3554 			}
3555 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3556 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3557 				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3558 				bnapi->last_status_idx);
3559 
3560 			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561 				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562 				bnapi->last_status_idx);
3563 			break;
3564 		}
3565 	}
3566 
3567 	return work_done;
3568 }
3569 
3570 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3571  * from set_multicast.
3572  */
3573 static void
bnx2_set_rx_mode(struct net_device * dev)3574 bnx2_set_rx_mode(struct net_device *dev)
3575 {
3576 	struct bnx2 *bp = netdev_priv(dev);
3577 	u32 rx_mode, sort_mode;
3578 	struct netdev_hw_addr *ha;
3579 	int i;
3580 
3581 	if (!netif_running(dev))
3582 		return;
3583 
3584 	spin_lock_bh(&bp->phy_lock);
3585 
3586 	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3587 				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3588 	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3589 	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590 	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3591 		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3592 	if (dev->flags & IFF_PROMISC) {
3593 		/* Promiscuous mode. */
3594 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3595 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3596 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3597 	}
3598 	else if (dev->flags & IFF_ALLMULTI) {
3599 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3600 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3601 				0xffffffff);
3602 		}
3603 		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3604 	}
3605 	else {
3606 		/* Accept one or more multicast(s). */
3607 		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3608 		u32 regidx;
3609 		u32 bit;
3610 		u32 crc;
3611 
3612 		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3613 
3614 		netdev_for_each_mc_addr(ha, dev) {
3615 			crc = ether_crc_le(ETH_ALEN, ha->addr);
3616 			bit = crc & 0xff;
3617 			regidx = (bit & 0xe0) >> 5;
3618 			bit &= 0x1f;
3619 			mc_filter[regidx] |= (1 << bit);
3620 		}
3621 
3622 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3623 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3624 				mc_filter[i]);
3625 		}
3626 
3627 		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3628 	}
3629 
3630 	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3631 		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3632 		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3633 			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3634 	} else if (!(dev->flags & IFF_PROMISC)) {
3635 		/* Add all entries into to the match filter list */
3636 		i = 0;
3637 		netdev_for_each_uc_addr(ha, dev) {
3638 			bnx2_set_mac_addr(bp, ha->addr,
3639 					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3640 			sort_mode |= (1 <<
3641 				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3642 			i++;
3643 		}
3644 
3645 	}
3646 
3647 	if (rx_mode != bp->rx_mode) {
3648 		bp->rx_mode = rx_mode;
3649 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650 	}
3651 
3652 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3654 	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3655 
3656 	spin_unlock_bh(&bp->phy_lock);
3657 }
3658 
3659 static int
check_fw_section(const struct firmware * fw,const struct bnx2_fw_file_section * section,u32 alignment,bool non_empty)3660 check_fw_section(const struct firmware *fw,
3661 		 const struct bnx2_fw_file_section *section,
3662 		 u32 alignment, bool non_empty)
3663 {
3664 	u32 offset = be32_to_cpu(section->offset);
3665 	u32 len = be32_to_cpu(section->len);
3666 
3667 	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3668 		return -EINVAL;
3669 	if ((non_empty && len == 0) || len > fw->size - offset ||
3670 	    len & (alignment - 1))
3671 		return -EINVAL;
3672 	return 0;
3673 }
3674 
3675 static int
check_mips_fw_entry(const struct firmware * fw,const struct bnx2_mips_fw_file_entry * entry)3676 check_mips_fw_entry(const struct firmware *fw,
3677 		    const struct bnx2_mips_fw_file_entry *entry)
3678 {
3679 	if (check_fw_section(fw, &entry->text, 4, true) ||
3680 	    check_fw_section(fw, &entry->data, 4, false) ||
3681 	    check_fw_section(fw, &entry->rodata, 4, false))
3682 		return -EINVAL;
3683 	return 0;
3684 }
3685 
bnx2_release_firmware(struct bnx2 * bp)3686 static void bnx2_release_firmware(struct bnx2 *bp)
3687 {
3688 	if (bp->rv2p_firmware) {
3689 		release_firmware(bp->mips_firmware);
3690 		release_firmware(bp->rv2p_firmware);
3691 		bp->rv2p_firmware = NULL;
3692 	}
3693 }
3694 
bnx2_request_uncached_firmware(struct bnx2 * bp)3695 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3696 {
3697 	const char *mips_fw_file, *rv2p_fw_file;
3698 	const struct bnx2_mips_fw_file *mips_fw;
3699 	const struct bnx2_rv2p_fw_file *rv2p_fw;
3700 	int rc;
3701 
3702 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3703 		mips_fw_file = FW_MIPS_FILE_09;
3704 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3705 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3706 			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3707 		else
3708 			rv2p_fw_file = FW_RV2P_FILE_09;
3709 	} else {
3710 		mips_fw_file = FW_MIPS_FILE_06;
3711 		rv2p_fw_file = FW_RV2P_FILE_06;
3712 	}
3713 
3714 	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3715 	if (rc) {
3716 		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3717 		goto out;
3718 	}
3719 
3720 	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3721 	if (rc) {
3722 		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3723 		goto err_release_mips_firmware;
3724 	}
3725 	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3726 	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3727 	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3728 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3729 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3730 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3731 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3732 	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3733 		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3734 		rc = -EINVAL;
3735 		goto err_release_firmware;
3736 	}
3737 	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3738 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3739 	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3740 		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3741 		rc = -EINVAL;
3742 		goto err_release_firmware;
3743 	}
3744 out:
3745 	return rc;
3746 
3747 err_release_firmware:
3748 	release_firmware(bp->rv2p_firmware);
3749 	bp->rv2p_firmware = NULL;
3750 err_release_mips_firmware:
3751 	release_firmware(bp->mips_firmware);
3752 	goto out;
3753 }
3754 
bnx2_request_firmware(struct bnx2 * bp)3755 static int bnx2_request_firmware(struct bnx2 *bp)
3756 {
3757 	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3758 }
3759 
3760 static u32
rv2p_fw_fixup(u32 rv2p_proc,int idx,u32 loc,u32 rv2p_code)3761 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3762 {
3763 	switch (idx) {
3764 	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3765 		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3766 		rv2p_code |= RV2P_BD_PAGE_SIZE;
3767 		break;
3768 	}
3769 	return rv2p_code;
3770 }
3771 
3772 static int
load_rv2p_fw(struct bnx2 * bp,u32 rv2p_proc,const struct bnx2_rv2p_fw_file_entry * fw_entry)3773 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3774 	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3775 {
3776 	u32 rv2p_code_len, file_offset;
3777 	__be32 *rv2p_code;
3778 	int i;
3779 	u32 val, cmd, addr;
3780 
3781 	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3782 	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3783 
3784 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3785 
3786 	if (rv2p_proc == RV2P_PROC1) {
3787 		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3788 		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3789 	} else {
3790 		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3791 		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3792 	}
3793 
3794 	for (i = 0; i < rv2p_code_len; i += 8) {
3795 		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3796 		rv2p_code++;
3797 		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3798 		rv2p_code++;
3799 
3800 		val = (i / 8) | cmd;
3801 		BNX2_WR(bp, addr, val);
3802 	}
3803 
3804 	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3805 	for (i = 0; i < 8; i++) {
3806 		u32 loc, code;
3807 
3808 		loc = be32_to_cpu(fw_entry->fixup[i]);
3809 		if (loc && ((loc * 4) < rv2p_code_len)) {
3810 			code = be32_to_cpu(*(rv2p_code + loc - 1));
3811 			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812 			code = be32_to_cpu(*(rv2p_code + loc));
3813 			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3814 			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815 
3816 			val = (loc / 2) | cmd;
3817 			BNX2_WR(bp, addr, val);
3818 		}
3819 	}
3820 
3821 	/* Reset the processor, un-stall is done later. */
3822 	if (rv2p_proc == RV2P_PROC1) {
3823 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824 	}
3825 	else {
3826 		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3827 	}
3828 
3829 	return 0;
3830 }
3831 
3832 static int
load_cpu_fw(struct bnx2 * bp,const struct cpu_reg * cpu_reg,const struct bnx2_mips_fw_file_entry * fw_entry)3833 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3834 	    const struct bnx2_mips_fw_file_entry *fw_entry)
3835 {
3836 	u32 addr, len, file_offset;
3837 	__be32 *data;
3838 	u32 offset;
3839 	u32 val;
3840 
3841 	/* Halt the CPU. */
3842 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3843 	val |= cpu_reg->mode_value_halt;
3844 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3845 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3846 
3847 	/* Load the Text area. */
3848 	addr = be32_to_cpu(fw_entry->text.addr);
3849 	len = be32_to_cpu(fw_entry->text.len);
3850 	file_offset = be32_to_cpu(fw_entry->text.offset);
3851 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3852 
3853 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3854 	if (len) {
3855 		int j;
3856 
3857 		for (j = 0; j < (len / 4); j++, offset += 4)
3858 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3859 	}
3860 
3861 	/* Load the Data area. */
3862 	addr = be32_to_cpu(fw_entry->data.addr);
3863 	len = be32_to_cpu(fw_entry->data.len);
3864 	file_offset = be32_to_cpu(fw_entry->data.offset);
3865 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866 
3867 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868 	if (len) {
3869 		int j;
3870 
3871 		for (j = 0; j < (len / 4); j++, offset += 4)
3872 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873 	}
3874 
3875 	/* Load the Read-Only area. */
3876 	addr = be32_to_cpu(fw_entry->rodata.addr);
3877 	len = be32_to_cpu(fw_entry->rodata.len);
3878 	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3879 	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880 
3881 	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882 	if (len) {
3883 		int j;
3884 
3885 		for (j = 0; j < (len / 4); j++, offset += 4)
3886 			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887 	}
3888 
3889 	/* Clear the pre-fetch instruction. */
3890 	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891 
3892 	val = be32_to_cpu(fw_entry->start_addr);
3893 	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894 
3895 	/* Start the CPU. */
3896 	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3897 	val &= ~cpu_reg->mode_value_halt;
3898 	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3899 	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3900 
3901 	return 0;
3902 }
3903 
3904 static int
bnx2_init_cpus(struct bnx2 * bp)3905 bnx2_init_cpus(struct bnx2 *bp)
3906 {
3907 	const struct bnx2_mips_fw_file *mips_fw =
3908 		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3909 	const struct bnx2_rv2p_fw_file *rv2p_fw =
3910 		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3911 	int rc;
3912 
3913 	/* Initialize the RV2P processor. */
3914 	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3915 	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3916 
3917 	/* Initialize the RX Processor. */
3918 	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3919 	if (rc)
3920 		goto init_cpu_err;
3921 
3922 	/* Initialize the TX Processor. */
3923 	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3924 	if (rc)
3925 		goto init_cpu_err;
3926 
3927 	/* Initialize the TX Patch-up Processor. */
3928 	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3929 	if (rc)
3930 		goto init_cpu_err;
3931 
3932 	/* Initialize the Completion Processor. */
3933 	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3934 	if (rc)
3935 		goto init_cpu_err;
3936 
3937 	/* Initialize the Command Processor. */
3938 	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3939 
3940 init_cpu_err:
3941 	return rc;
3942 }
3943 
3944 static void
bnx2_setup_wol(struct bnx2 * bp)3945 bnx2_setup_wol(struct bnx2 *bp)
3946 {
3947 	int i;
3948 	u32 val, wol_msg;
3949 
3950 	if (bp->wol) {
3951 		u32 advertising;
3952 		u8 autoneg;
3953 
3954 		autoneg = bp->autoneg;
3955 		advertising = bp->advertising;
3956 
3957 		if (bp->phy_port == PORT_TP) {
3958 			bp->autoneg = AUTONEG_SPEED;
3959 			bp->advertising = ADVERTISED_10baseT_Half |
3960 				ADVERTISED_10baseT_Full |
3961 				ADVERTISED_100baseT_Half |
3962 				ADVERTISED_100baseT_Full |
3963 				ADVERTISED_Autoneg;
3964 		}
3965 
3966 		spin_lock_bh(&bp->phy_lock);
3967 		bnx2_setup_phy(bp, bp->phy_port);
3968 		spin_unlock_bh(&bp->phy_lock);
3969 
3970 		bp->autoneg = autoneg;
3971 		bp->advertising = advertising;
3972 
3973 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3974 
3975 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3976 
3977 		/* Enable port mode. */
3978 		val &= ~BNX2_EMAC_MODE_PORT;
3979 		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3980 		       BNX2_EMAC_MODE_ACPI_RCVD |
3981 		       BNX2_EMAC_MODE_MPKT;
3982 		if (bp->phy_port == PORT_TP) {
3983 			val |= BNX2_EMAC_MODE_PORT_MII;
3984 		} else {
3985 			val |= BNX2_EMAC_MODE_PORT_GMII;
3986 			if (bp->line_speed == SPEED_2500)
3987 				val |= BNX2_EMAC_MODE_25G_MODE;
3988 		}
3989 
3990 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3991 
3992 		/* receive all multicast */
3993 		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3994 			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3995 				0xffffffff);
3996 		}
3997 		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3998 
3999 		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4000 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4001 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4002 		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4003 
4004 		/* Need to enable EMAC and RPM for WOL. */
4005 		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4006 			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4007 			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4008 			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4009 
4010 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4011 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4012 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4013 
4014 		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4015 	} else {
4016 			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017 	}
4018 
4019 	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4020 		u32 val;
4021 
4022 		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4023 		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4024 			bnx2_fw_sync(bp, wol_msg, 1, 0);
4025 			return;
4026 		}
4027 		/* Tell firmware not to power down the PHY yet, otherwise
4028 		 * the chip will take a long time to respond to MMIO reads.
4029 		 */
4030 		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4031 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4032 			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4033 		bnx2_fw_sync(bp, wol_msg, 1, 0);
4034 		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4035 	}
4036 
4037 }
4038 
4039 static int
bnx2_set_power_state(struct bnx2 * bp,pci_power_t state)4040 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4041 {
4042 	switch (state) {
4043 	case PCI_D0: {
4044 		u32 val;
4045 
4046 		pci_enable_wake(bp->pdev, PCI_D0, false);
4047 		pci_set_power_state(bp->pdev, PCI_D0);
4048 
4049 		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4050 		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4051 		val &= ~BNX2_EMAC_MODE_MPKT;
4052 		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4053 
4054 		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4055 		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4056 		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4057 		break;
4058 	}
4059 	case PCI_D3hot: {
4060 		bnx2_setup_wol(bp);
4061 		pci_wake_from_d3(bp->pdev, bp->wol);
4062 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4063 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4064 
4065 			if (bp->wol)
4066 				pci_set_power_state(bp->pdev, PCI_D3hot);
4067 			break;
4068 
4069 		}
4070 		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4071 			u32 val;
4072 
4073 			/* Tell firmware not to power down the PHY yet,
4074 			 * otherwise the other port may not respond to
4075 			 * MMIO reads.
4076 			 */
4077 			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4078 			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4079 			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4080 			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4081 		}
4082 		pci_set_power_state(bp->pdev, PCI_D3hot);
4083 
4084 		/* No more memory access after this point until
4085 		 * device is brought back to D0.
4086 		 */
4087 		break;
4088 	}
4089 	default:
4090 		return -EINVAL;
4091 	}
4092 	return 0;
4093 }
4094 
4095 static int
bnx2_acquire_nvram_lock(struct bnx2 * bp)4096 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4097 {
4098 	u32 val;
4099 	int j;
4100 
4101 	/* Request access to the flash interface. */
4102 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4103 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4104 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4105 		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4106 			break;
4107 
4108 		udelay(5);
4109 	}
4110 
4111 	if (j >= NVRAM_TIMEOUT_COUNT)
4112 		return -EBUSY;
4113 
4114 	return 0;
4115 }
4116 
4117 static int
bnx2_release_nvram_lock(struct bnx2 * bp)4118 bnx2_release_nvram_lock(struct bnx2 *bp)
4119 {
4120 	int j;
4121 	u32 val;
4122 
4123 	/* Relinquish nvram interface. */
4124 	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4125 
4126 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127 		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4128 		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4129 			break;
4130 
4131 		udelay(5);
4132 	}
4133 
4134 	if (j >= NVRAM_TIMEOUT_COUNT)
4135 		return -EBUSY;
4136 
4137 	return 0;
4138 }
4139 
4140 
4141 static int
bnx2_enable_nvram_write(struct bnx2 * bp)4142 bnx2_enable_nvram_write(struct bnx2 *bp)
4143 {
4144 	u32 val;
4145 
4146 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4147 	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4148 
4149 	if (bp->flash_info->flags & BNX2_NV_WREN) {
4150 		int j;
4151 
4152 		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153 		BNX2_WR(bp, BNX2_NVM_COMMAND,
4154 			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4155 
4156 		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4157 			udelay(5);
4158 
4159 			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4160 			if (val & BNX2_NVM_COMMAND_DONE)
4161 				break;
4162 		}
4163 
4164 		if (j >= NVRAM_TIMEOUT_COUNT)
4165 			return -EBUSY;
4166 	}
4167 	return 0;
4168 }
4169 
4170 static void
bnx2_disable_nvram_write(struct bnx2 * bp)4171 bnx2_disable_nvram_write(struct bnx2 *bp)
4172 {
4173 	u32 val;
4174 
4175 	val = BNX2_RD(bp, BNX2_MISC_CFG);
4176 	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4177 }
4178 
4179 
4180 static void
bnx2_enable_nvram_access(struct bnx2 * bp)4181 bnx2_enable_nvram_access(struct bnx2 *bp)
4182 {
4183 	u32 val;
4184 
4185 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4186 	/* Enable both bits, even on read. */
4187 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188 		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4189 }
4190 
4191 static void
bnx2_disable_nvram_access(struct bnx2 * bp)4192 bnx2_disable_nvram_access(struct bnx2 *bp)
4193 {
4194 	u32 val;
4195 
4196 	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4197 	/* Disable both bits, even after read. */
4198 	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4199 		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4200 			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4201 }
4202 
4203 static int
bnx2_nvram_erase_page(struct bnx2 * bp,u32 offset)4204 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4205 {
4206 	u32 cmd;
4207 	int j;
4208 
4209 	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4210 		/* Buffered flash, no erase needed */
4211 		return 0;
4212 
4213 	/* Build an erase command */
4214 	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4215 	      BNX2_NVM_COMMAND_DOIT;
4216 
4217 	/* Need to clear DONE bit separately. */
4218 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4219 
4220 	/* Address of the NVRAM to read from. */
4221 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4222 
4223 	/* Issue an erase command. */
4224 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4225 
4226 	/* Wait for completion. */
4227 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228 		u32 val;
4229 
4230 		udelay(5);
4231 
4232 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4233 		if (val & BNX2_NVM_COMMAND_DONE)
4234 			break;
4235 	}
4236 
4237 	if (j >= NVRAM_TIMEOUT_COUNT)
4238 		return -EBUSY;
4239 
4240 	return 0;
4241 }
4242 
4243 static int
bnx2_nvram_read_dword(struct bnx2 * bp,u32 offset,u8 * ret_val,u32 cmd_flags)4244 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4245 {
4246 	u32 cmd;
4247 	int j;
4248 
4249 	/* Build the command word. */
4250 	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4251 
4252 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4253 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254 		offset = ((offset / bp->flash_info->page_size) <<
4255 			   bp->flash_info->page_bits) +
4256 			  (offset % bp->flash_info->page_size);
4257 	}
4258 
4259 	/* Need to clear DONE bit separately. */
4260 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261 
4262 	/* Address of the NVRAM to read from. */
4263 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4264 
4265 	/* Issue a read command. */
4266 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4267 
4268 	/* Wait for completion. */
4269 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4270 		u32 val;
4271 
4272 		udelay(5);
4273 
4274 		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4275 		if (val & BNX2_NVM_COMMAND_DONE) {
4276 			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4277 			memcpy(ret_val, &v, 4);
4278 			break;
4279 		}
4280 	}
4281 	if (j >= NVRAM_TIMEOUT_COUNT)
4282 		return -EBUSY;
4283 
4284 	return 0;
4285 }
4286 
4287 
4288 static int
bnx2_nvram_write_dword(struct bnx2 * bp,u32 offset,u8 * val,u32 cmd_flags)4289 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4290 {
4291 	u32 cmd;
4292 	__be32 val32;
4293 	int j;
4294 
4295 	/* Build the command word. */
4296 	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4297 
4298 	/* Calculate an offset of a buffered flash, not needed for 5709. */
4299 	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4300 		offset = ((offset / bp->flash_info->page_size) <<
4301 			  bp->flash_info->page_bits) +
4302 			 (offset % bp->flash_info->page_size);
4303 	}
4304 
4305 	/* Need to clear DONE bit separately. */
4306 	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4307 
4308 	memcpy(&val32, val, 4);
4309 
4310 	/* Write the data. */
4311 	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4312 
4313 	/* Address of the NVRAM to write to. */
4314 	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4315 
4316 	/* Issue the write command. */
4317 	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4318 
4319 	/* Wait for completion. */
4320 	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4321 		udelay(5);
4322 
4323 		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4324 			break;
4325 	}
4326 	if (j >= NVRAM_TIMEOUT_COUNT)
4327 		return -EBUSY;
4328 
4329 	return 0;
4330 }
4331 
4332 static int
bnx2_init_nvram(struct bnx2 * bp)4333 bnx2_init_nvram(struct bnx2 *bp)
4334 {
4335 	u32 val;
4336 	int j, entry_count, rc = 0;
4337 	const struct flash_spec *flash;
4338 
4339 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4340 		bp->flash_info = &flash_5709;
4341 		goto get_flash_size;
4342 	}
4343 
4344 	/* Determine the selected interface. */
4345 	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4346 
4347 	entry_count = ARRAY_SIZE(flash_table);
4348 
4349 	if (val & 0x40000000) {
4350 
4351 		/* Flash interface has been reconfigured */
4352 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4353 		     j++, flash++) {
4354 			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4355 			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4356 				bp->flash_info = flash;
4357 				break;
4358 			}
4359 		}
4360 	}
4361 	else {
4362 		u32 mask;
4363 		/* Not yet been reconfigured */
4364 
4365 		if (val & (1 << 23))
4366 			mask = FLASH_BACKUP_STRAP_MASK;
4367 		else
4368 			mask = FLASH_STRAP_MASK;
4369 
4370 		for (j = 0, flash = &flash_table[0]; j < entry_count;
4371 			j++, flash++) {
4372 
4373 			if ((val & mask) == (flash->strapping & mask)) {
4374 				bp->flash_info = flash;
4375 
4376 				/* Request access to the flash interface. */
4377 				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378 					return rc;
4379 
4380 				/* Enable access to flash interface */
4381 				bnx2_enable_nvram_access(bp);
4382 
4383 				/* Reconfigure the flash interface */
4384 				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4385 				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4386 				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4387 				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4388 
4389 				/* Disable access to flash interface */
4390 				bnx2_disable_nvram_access(bp);
4391 				bnx2_release_nvram_lock(bp);
4392 
4393 				break;
4394 			}
4395 		}
4396 	} /* if (val & 0x40000000) */
4397 
4398 	if (j == entry_count) {
4399 		bp->flash_info = NULL;
4400 		pr_alert("Unknown flash/EEPROM type\n");
4401 		return -ENODEV;
4402 	}
4403 
4404 get_flash_size:
4405 	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4406 	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4407 	if (val)
4408 		bp->flash_size = val;
4409 	else
4410 		bp->flash_size = bp->flash_info->total_size;
4411 
4412 	return rc;
4413 }
4414 
4415 static int
bnx2_nvram_read(struct bnx2 * bp,u32 offset,u8 * ret_buf,int buf_size)4416 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4417 		int buf_size)
4418 {
4419 	int rc = 0;
4420 	u32 cmd_flags, offset32, len32, extra;
4421 
4422 	if (buf_size == 0)
4423 		return 0;
4424 
4425 	/* Request access to the flash interface. */
4426 	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4427 		return rc;
4428 
4429 	/* Enable access to flash interface */
4430 	bnx2_enable_nvram_access(bp);
4431 
4432 	len32 = buf_size;
4433 	offset32 = offset;
4434 	extra = 0;
4435 
4436 	cmd_flags = 0;
4437 
4438 	if (offset32 & 3) {
4439 		u8 buf[4];
4440 		u32 pre_len;
4441 
4442 		offset32 &= ~3;
4443 		pre_len = 4 - (offset & 3);
4444 
4445 		if (pre_len >= len32) {
4446 			pre_len = len32;
4447 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4448 				    BNX2_NVM_COMMAND_LAST;
4449 		}
4450 		else {
4451 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4452 		}
4453 
4454 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4455 
4456 		if (rc)
4457 			return rc;
4458 
4459 		memcpy(ret_buf, buf + (offset & 3), pre_len);
4460 
4461 		offset32 += 4;
4462 		ret_buf += pre_len;
4463 		len32 -= pre_len;
4464 	}
4465 	if (len32 & 3) {
4466 		extra = 4 - (len32 & 3);
4467 		len32 = (len32 + 4) & ~3;
4468 	}
4469 
4470 	if (len32 == 4) {
4471 		u8 buf[4];
4472 
4473 		if (cmd_flags)
4474 			cmd_flags = BNX2_NVM_COMMAND_LAST;
4475 		else
4476 			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4477 				    BNX2_NVM_COMMAND_LAST;
4478 
4479 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4480 
4481 		memcpy(ret_buf, buf, 4 - extra);
4482 	}
4483 	else if (len32 > 0) {
4484 		u8 buf[4];
4485 
4486 		/* Read the first word. */
4487 		if (cmd_flags)
4488 			cmd_flags = 0;
4489 		else
4490 			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4491 
4492 		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4493 
4494 		/* Advance to the next dword. */
4495 		offset32 += 4;
4496 		ret_buf += 4;
4497 		len32 -= 4;
4498 
4499 		while (len32 > 4 && rc == 0) {
4500 			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4501 
4502 			/* Advance to the next dword. */
4503 			offset32 += 4;
4504 			ret_buf += 4;
4505 			len32 -= 4;
4506 		}
4507 
4508 		if (rc)
4509 			return rc;
4510 
4511 		cmd_flags = BNX2_NVM_COMMAND_LAST;
4512 		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4513 
4514 		memcpy(ret_buf, buf, 4 - extra);
4515 	}
4516 
4517 	/* Disable access to flash interface */
4518 	bnx2_disable_nvram_access(bp);
4519 
4520 	bnx2_release_nvram_lock(bp);
4521 
4522 	return rc;
4523 }
4524 
4525 static int
bnx2_nvram_write(struct bnx2 * bp,u32 offset,u8 * data_buf,int buf_size)4526 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4527 		int buf_size)
4528 {
4529 	u32 written, offset32, len32;
4530 	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4531 	int rc = 0;
4532 	int align_start, align_end;
4533 
4534 	buf = data_buf;
4535 	offset32 = offset;
4536 	len32 = buf_size;
4537 	align_start = align_end = 0;
4538 
4539 	if ((align_start = (offset32 & 3))) {
4540 		offset32 &= ~3;
4541 		len32 += align_start;
4542 		if (len32 < 4)
4543 			len32 = 4;
4544 		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4545 			return rc;
4546 	}
4547 
4548 	if (len32 & 3) {
4549 		align_end = 4 - (len32 & 3);
4550 		len32 += align_end;
4551 		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4552 			return rc;
4553 	}
4554 
4555 	if (align_start || align_end) {
4556 		align_buf = kmalloc(len32, GFP_KERNEL);
4557 		if (!align_buf)
4558 			return -ENOMEM;
4559 		if (align_start) {
4560 			memcpy(align_buf, start, 4);
4561 		}
4562 		if (align_end) {
4563 			memcpy(align_buf + len32 - 4, end, 4);
4564 		}
4565 		memcpy(align_buf + align_start, data_buf, buf_size);
4566 		buf = align_buf;
4567 	}
4568 
4569 	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570 		flash_buffer = kmalloc(264, GFP_KERNEL);
4571 		if (!flash_buffer) {
4572 			rc = -ENOMEM;
4573 			goto nvram_write_end;
4574 		}
4575 	}
4576 
4577 	written = 0;
4578 	while ((written < len32) && (rc == 0)) {
4579 		u32 page_start, page_end, data_start, data_end;
4580 		u32 addr, cmd_flags;
4581 		int i;
4582 
4583 	        /* Find the page_start addr */
4584 		page_start = offset32 + written;
4585 		page_start -= (page_start % bp->flash_info->page_size);
4586 		/* Find the page_end addr */
4587 		page_end = page_start + bp->flash_info->page_size;
4588 		/* Find the data_start addr */
4589 		data_start = (written == 0) ? offset32 : page_start;
4590 		/* Find the data_end addr */
4591 		data_end = (page_end > offset32 + len32) ?
4592 			(offset32 + len32) : page_end;
4593 
4594 		/* Request access to the flash interface. */
4595 		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4596 			goto nvram_write_end;
4597 
4598 		/* Enable access to flash interface */
4599 		bnx2_enable_nvram_access(bp);
4600 
4601 		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4602 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603 			int j;
4604 
4605 			/* Read the whole page into the buffer
4606 			 * (non-buffer flash only) */
4607 			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4608 				if (j == (bp->flash_info->page_size - 4)) {
4609 					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4610 				}
4611 				rc = bnx2_nvram_read_dword(bp,
4612 					page_start + j,
4613 					&flash_buffer[j],
4614 					cmd_flags);
4615 
4616 				if (rc)
4617 					goto nvram_write_end;
4618 
4619 				cmd_flags = 0;
4620 			}
4621 		}
4622 
4623 		/* Enable writes to flash interface (unlock write-protect) */
4624 		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4625 			goto nvram_write_end;
4626 
4627 		/* Loop to write back the buffer data from page_start to
4628 		 * data_start */
4629 		i = 0;
4630 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4631 			/* Erase the page */
4632 			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4633 				goto nvram_write_end;
4634 
4635 			/* Re-enable the write again for the actual write */
4636 			bnx2_enable_nvram_write(bp);
4637 
4638 			for (addr = page_start; addr < data_start;
4639 				addr += 4, i += 4) {
4640 
4641 				rc = bnx2_nvram_write_dword(bp, addr,
4642 					&flash_buffer[i], cmd_flags);
4643 
4644 				if (rc != 0)
4645 					goto nvram_write_end;
4646 
4647 				cmd_flags = 0;
4648 			}
4649 		}
4650 
4651 		/* Loop to write the new data from data_start to data_end */
4652 		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4653 			if ((addr == page_end - 4) ||
4654 				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4655 				 (addr == data_end - 4))) {
4656 
4657 				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4658 			}
4659 			rc = bnx2_nvram_write_dword(bp, addr, buf,
4660 				cmd_flags);
4661 
4662 			if (rc != 0)
4663 				goto nvram_write_end;
4664 
4665 			cmd_flags = 0;
4666 			buf += 4;
4667 		}
4668 
4669 		/* Loop to write back the buffer data from data_end
4670 		 * to page_end */
4671 		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4672 			for (addr = data_end; addr < page_end;
4673 				addr += 4, i += 4) {
4674 
4675 				if (addr == page_end-4) {
4676 					cmd_flags = BNX2_NVM_COMMAND_LAST;
4677 				}
4678 				rc = bnx2_nvram_write_dword(bp, addr,
4679 					&flash_buffer[i], cmd_flags);
4680 
4681 				if (rc != 0)
4682 					goto nvram_write_end;
4683 
4684 				cmd_flags = 0;
4685 			}
4686 		}
4687 
4688 		/* Disable writes to flash interface (lock write-protect) */
4689 		bnx2_disable_nvram_write(bp);
4690 
4691 		/* Disable access to flash interface */
4692 		bnx2_disable_nvram_access(bp);
4693 		bnx2_release_nvram_lock(bp);
4694 
4695 		/* Increment written */
4696 		written += data_end - data_start;
4697 	}
4698 
4699 nvram_write_end:
4700 	kfree(flash_buffer);
4701 	kfree(align_buf);
4702 	return rc;
4703 }
4704 
4705 static void
bnx2_init_fw_cap(struct bnx2 * bp)4706 bnx2_init_fw_cap(struct bnx2 *bp)
4707 {
4708 	u32 val, sig = 0;
4709 
4710 	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4711 	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4712 
4713 	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4714 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4715 
4716 	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4717 	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4718 		return;
4719 
4720 	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4721 		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4723 	}
4724 
4725 	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4726 	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4727 		u32 link;
4728 
4729 		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4730 
4731 		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4732 		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4733 			bp->phy_port = PORT_FIBRE;
4734 		else
4735 			bp->phy_port = PORT_TP;
4736 
4737 		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4738 		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4739 	}
4740 
4741 	if (netif_running(bp->dev) && sig)
4742 		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4743 }
4744 
4745 static void
bnx2_setup_msix_tbl(struct bnx2 * bp)4746 bnx2_setup_msix_tbl(struct bnx2 *bp)
4747 {
4748 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4749 
4750 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4751 	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4752 }
4753 
4754 static void
bnx2_wait_dma_complete(struct bnx2 * bp)4755 bnx2_wait_dma_complete(struct bnx2 *bp)
4756 {
4757 	u32 val;
4758 	int i;
4759 
4760 	/*
4761 	 * Wait for the current PCI transaction to complete before
4762 	 * issuing a reset.
4763 	 */
4764 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4765 	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4766 		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4767 			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4768 			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4769 			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4770 			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4771 		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4772 		udelay(5);
4773 	} else {  /* 5709 */
4774 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4775 		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4776 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4777 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778 
4779 		for (i = 0; i < 100; i++) {
4780 			msleep(1);
4781 			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4782 			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4783 				break;
4784 		}
4785 	}
4786 
4787 	return;
4788 }
4789 
4790 
4791 static int
bnx2_reset_chip(struct bnx2 * bp,u32 reset_code)4792 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4793 {
4794 	u32 val;
4795 	int i, rc = 0;
4796 	u8 old_port;
4797 
4798 	/* Wait for the current PCI transaction to complete before
4799 	 * issuing a reset. */
4800 	bnx2_wait_dma_complete(bp);
4801 
4802 	/* Wait for the firmware to tell us it is ok to issue a reset. */
4803 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4804 
4805 	/* Deposit a driver reset signature so the firmware knows that
4806 	 * this is a soft reset. */
4807 	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4808 		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4809 
4810 	/* Do a dummy read to force the chip to complete all current transaction
4811 	 * before we issue a reset. */
4812 	val = BNX2_RD(bp, BNX2_MISC_ID);
4813 
4814 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4815 		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4816 		BNX2_RD(bp, BNX2_MISC_COMMAND);
4817 		udelay(5);
4818 
4819 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4820 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4821 
4822 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4823 
4824 	} else {
4825 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4826 		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828 
4829 		/* Chip reset. */
4830 		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4831 
4832 		/* Reading back any register after chip reset will hang the
4833 		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4834 		 * of margin for write posting.
4835 		 */
4836 		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4837 		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4838 			msleep(20);
4839 
4840 		/* Reset takes approximate 30 usec */
4841 		for (i = 0; i < 10; i++) {
4842 			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4843 			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4844 				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4845 				break;
4846 			udelay(10);
4847 		}
4848 
4849 		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4850 			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4851 			pr_err("Chip reset did not complete\n");
4852 			return -EBUSY;
4853 		}
4854 	}
4855 
4856 	/* Make sure byte swapping is properly configured. */
4857 	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4858 	if (val != 0x01020304) {
4859 		pr_err("Chip not in correct endian mode\n");
4860 		return -ENODEV;
4861 	}
4862 
4863 	/* Wait for the firmware to finish its initialization. */
4864 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4865 	if (rc)
4866 		return rc;
4867 
4868 	spin_lock_bh(&bp->phy_lock);
4869 	old_port = bp->phy_port;
4870 	bnx2_init_fw_cap(bp);
4871 	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4872 	    old_port != bp->phy_port)
4873 		bnx2_set_default_remote_link(bp);
4874 	spin_unlock_bh(&bp->phy_lock);
4875 
4876 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4877 		/* Adjust the voltage regular to two steps lower.  The default
4878 		 * of this register is 0x0000000e. */
4879 		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4880 
4881 		/* Remove bad rbuf memory from the free pool. */
4882 		rc = bnx2_alloc_bad_rbuf(bp);
4883 	}
4884 
4885 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4886 		bnx2_setup_msix_tbl(bp);
4887 		/* Prevent MSIX table reads and write from timing out */
4888 		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4889 			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4890 	}
4891 
4892 	return rc;
4893 }
4894 
4895 static int
bnx2_init_chip(struct bnx2 * bp)4896 bnx2_init_chip(struct bnx2 *bp)
4897 {
4898 	u32 val, mtu;
4899 	int rc, i;
4900 
4901 	/* Make sure the interrupt is not active. */
4902 	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4903 
4904 	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4905 	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4906 #ifdef __BIG_ENDIAN
4907 	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4908 #endif
4909 	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4910 	      DMA_READ_CHANS << 12 |
4911 	      DMA_WRITE_CHANS << 16;
4912 
4913 	val |= (0x2 << 20) | (1 << 11);
4914 
4915 	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4916 		val |= (1 << 23);
4917 
4918 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4919 	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4920 	    !(bp->flags & BNX2_FLAG_PCIX))
4921 		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4922 
4923 	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4924 
4925 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4926 		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4927 		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4928 		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4929 	}
4930 
4931 	if (bp->flags & BNX2_FLAG_PCIX) {
4932 		u16 val16;
4933 
4934 		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4935 				     &val16);
4936 		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4937 				      val16 & ~PCI_X_CMD_ERO);
4938 	}
4939 
4940 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4941 		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4942 		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4943 		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4944 
4945 	/* Initialize context mapping and zero out the quick contexts.  The
4946 	 * context block must have already been enabled. */
4947 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948 		rc = bnx2_init_5709_context(bp);
4949 		if (rc)
4950 			return rc;
4951 	} else
4952 		bnx2_init_context(bp);
4953 
4954 	if ((rc = bnx2_init_cpus(bp)) != 0)
4955 		return rc;
4956 
4957 	bnx2_init_nvram(bp);
4958 
4959 	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4960 
4961 	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4962 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4963 	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4964 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4965 		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4966 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4967 			val |= BNX2_MQ_CONFIG_HALT_DIS;
4968 	}
4969 
4970 	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4971 
4972 	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4973 	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4974 	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4975 
4976 	val = (BNX2_PAGE_BITS - 8) << 24;
4977 	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4978 
4979 	/* Configure page size. */
4980 	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4981 	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4982 	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4983 	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4984 
4985 	val = bp->mac_addr[0] +
4986 	      (bp->mac_addr[1] << 8) +
4987 	      (bp->mac_addr[2] << 16) +
4988 	      bp->mac_addr[3] +
4989 	      (bp->mac_addr[4] << 8) +
4990 	      (bp->mac_addr[5] << 16);
4991 	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4992 
4993 	/* Program the MTU.  Also include 4 bytes for CRC32. */
4994 	mtu = bp->dev->mtu;
4995 	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4996 	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4997 		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4998 	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4999 
5000 	if (mtu < ETH_DATA_LEN)
5001 		mtu = ETH_DATA_LEN;
5002 
5003 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5004 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5005 	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5006 
5007 	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5008 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5009 		bp->bnx2_napi[i].last_status_idx = 0;
5010 
5011 	bp->idle_chk_status_idx = 0xffff;
5012 
5013 	/* Set up how to generate a link change interrupt. */
5014 	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5015 
5016 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5017 		(u64) bp->status_blk_mapping & 0xffffffff);
5018 	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5019 
5020 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5021 		(u64) bp->stats_blk_mapping & 0xffffffff);
5022 	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5023 		(u64) bp->stats_blk_mapping >> 32);
5024 
5025 	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5026 		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5027 
5028 	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5029 		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5030 
5031 	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5032 		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5033 
5034 	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5035 
5036 	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037 
5038 	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5039 		(bp->com_ticks_int << 16) | bp->com_ticks);
5040 
5041 	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5042 		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5043 
5044 	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5045 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5046 	else
5047 		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5048 	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5049 
5050 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5051 		val = BNX2_HC_CONFIG_COLLECT_STATS;
5052 	else {
5053 		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5054 		      BNX2_HC_CONFIG_COLLECT_STATS;
5055 	}
5056 
5057 	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5058 		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5059 			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5060 
5061 		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5062 	}
5063 
5064 	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5065 		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5066 
5067 	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5068 
5069 	if (bp->rx_ticks < 25)
5070 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5071 	else
5072 		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5073 
5074 	for (i = 1; i < bp->irq_nvecs; i++) {
5075 		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5076 			   BNX2_HC_SB_CONFIG_1;
5077 
5078 		BNX2_WR(bp, base,
5079 			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5080 			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5081 			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5082 
5083 		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5084 			(bp->tx_quick_cons_trip_int << 16) |
5085 			 bp->tx_quick_cons_trip);
5086 
5087 		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5088 			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5089 
5090 		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5091 			(bp->rx_quick_cons_trip_int << 16) |
5092 			bp->rx_quick_cons_trip);
5093 
5094 		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5095 			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5096 	}
5097 
5098 	/* Clear internal stats counters. */
5099 	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5100 
5101 	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5102 
5103 	/* Initialize the receive filter. */
5104 	bnx2_set_rx_mode(bp->dev);
5105 
5106 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5107 		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5108 		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5109 		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5110 	}
5111 	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5112 			  1, 0);
5113 
5114 	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5115 	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5116 
5117 	udelay(20);
5118 
5119 	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5120 
5121 	return rc;
5122 }
5123 
5124 static void
bnx2_clear_ring_states(struct bnx2 * bp)5125 bnx2_clear_ring_states(struct bnx2 *bp)
5126 {
5127 	struct bnx2_napi *bnapi;
5128 	struct bnx2_tx_ring_info *txr;
5129 	struct bnx2_rx_ring_info *rxr;
5130 	int i;
5131 
5132 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5133 		bnapi = &bp->bnx2_napi[i];
5134 		txr = &bnapi->tx_ring;
5135 		rxr = &bnapi->rx_ring;
5136 
5137 		txr->tx_cons = 0;
5138 		txr->hw_tx_cons = 0;
5139 		rxr->rx_prod_bseq = 0;
5140 		rxr->rx_prod = 0;
5141 		rxr->rx_cons = 0;
5142 		rxr->rx_pg_prod = 0;
5143 		rxr->rx_pg_cons = 0;
5144 	}
5145 }
5146 
5147 static void
bnx2_init_tx_context(struct bnx2 * bp,u32 cid,struct bnx2_tx_ring_info * txr)5148 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5149 {
5150 	u32 val, offset0, offset1, offset2, offset3;
5151 	u32 cid_addr = GET_CID_ADDR(cid);
5152 
5153 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5154 		offset0 = BNX2_L2CTX_TYPE_XI;
5155 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5156 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5157 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5158 	} else {
5159 		offset0 = BNX2_L2CTX_TYPE;
5160 		offset1 = BNX2_L2CTX_CMD_TYPE;
5161 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5162 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5163 	}
5164 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5165 	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5166 
5167 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5168 	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5169 
5170 	val = (u64) txr->tx_desc_mapping >> 32;
5171 	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5172 
5173 	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5174 	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5175 }
5176 
5177 static void
bnx2_init_tx_ring(struct bnx2 * bp,int ring_num)5178 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5179 {
5180 	struct bnx2_tx_bd *txbd;
5181 	u32 cid = TX_CID;
5182 	struct bnx2_napi *bnapi;
5183 	struct bnx2_tx_ring_info *txr;
5184 
5185 	bnapi = &bp->bnx2_napi[ring_num];
5186 	txr = &bnapi->tx_ring;
5187 
5188 	if (ring_num == 0)
5189 		cid = TX_CID;
5190 	else
5191 		cid = TX_TSS_CID + ring_num - 1;
5192 
5193 	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5194 
5195 	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5196 
5197 	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5198 	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5199 
5200 	txr->tx_prod = 0;
5201 	txr->tx_prod_bseq = 0;
5202 
5203 	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5204 	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5205 
5206 	bnx2_init_tx_context(bp, cid, txr);
5207 }
5208 
5209 static void
bnx2_init_rxbd_rings(struct bnx2_rx_bd * rx_ring[],dma_addr_t dma[],u32 buf_size,int num_rings)5210 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5211 		     u32 buf_size, int num_rings)
5212 {
5213 	int i;
5214 	struct bnx2_rx_bd *rxbd;
5215 
5216 	for (i = 0; i < num_rings; i++) {
5217 		int j;
5218 
5219 		rxbd = &rx_ring[i][0];
5220 		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5221 			rxbd->rx_bd_len = buf_size;
5222 			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5223 		}
5224 		if (i == (num_rings - 1))
5225 			j = 0;
5226 		else
5227 			j = i + 1;
5228 		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5229 		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5230 	}
5231 }
5232 
5233 static void
bnx2_init_rx_ring(struct bnx2 * bp,int ring_num)5234 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5235 {
5236 	int i;
5237 	u16 prod, ring_prod;
5238 	u32 cid, rx_cid_addr, val;
5239 	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5240 	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5241 
5242 	if (ring_num == 0)
5243 		cid = RX_CID;
5244 	else
5245 		cid = RX_RSS_CID + ring_num - 1;
5246 
5247 	rx_cid_addr = GET_CID_ADDR(cid);
5248 
5249 	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5250 			     bp->rx_buf_use_size, bp->rx_max_ring);
5251 
5252 	bnx2_init_rx_context(bp, cid);
5253 
5254 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5255 		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5256 		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5257 	}
5258 
5259 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5260 	if (bp->rx_pg_ring_size) {
5261 		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5262 				     rxr->rx_pg_desc_mapping,
5263 				     PAGE_SIZE, bp->rx_max_pg_ring);
5264 		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5265 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5266 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5267 		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5268 
5269 		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5270 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5271 
5272 		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5273 		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5274 
5275 		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5276 			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5277 	}
5278 
5279 	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5280 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5281 
5282 	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5283 	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5284 
5285 	ring_prod = prod = rxr->rx_pg_prod;
5286 	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5287 		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5288 			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5289 				    ring_num, i, bp->rx_pg_ring_size);
5290 			break;
5291 		}
5292 		prod = BNX2_NEXT_RX_BD(prod);
5293 		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5294 	}
5295 	rxr->rx_pg_prod = prod;
5296 
5297 	ring_prod = prod = rxr->rx_prod;
5298 	for (i = 0; i < bp->rx_ring_size; i++) {
5299 		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5300 			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5301 				    ring_num, i, bp->rx_ring_size);
5302 			break;
5303 		}
5304 		prod = BNX2_NEXT_RX_BD(prod);
5305 		ring_prod = BNX2_RX_RING_IDX(prod);
5306 	}
5307 	rxr->rx_prod = prod;
5308 
5309 	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5310 	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5311 	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5312 
5313 	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5314 	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5315 
5316 	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5317 }
5318 
5319 static void
bnx2_init_all_rings(struct bnx2 * bp)5320 bnx2_init_all_rings(struct bnx2 *bp)
5321 {
5322 	int i;
5323 	u32 val;
5324 
5325 	bnx2_clear_ring_states(bp);
5326 
5327 	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5328 	for (i = 0; i < bp->num_tx_rings; i++)
5329 		bnx2_init_tx_ring(bp, i);
5330 
5331 	if (bp->num_tx_rings > 1)
5332 		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5333 			(TX_TSS_CID << 7));
5334 
5335 	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5336 	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5337 
5338 	for (i = 0; i < bp->num_rx_rings; i++)
5339 		bnx2_init_rx_ring(bp, i);
5340 
5341 	if (bp->num_rx_rings > 1) {
5342 		u32 tbl_32 = 0;
5343 
5344 		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5345 			int shift = (i % 8) << 2;
5346 
5347 			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5348 			if ((i % 8) == 7) {
5349 				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5350 				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5351 					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5352 					BNX2_RLUP_RSS_COMMAND_WRITE |
5353 					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5354 				tbl_32 = 0;
5355 			}
5356 		}
5357 
5358 		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5359 		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5360 
5361 		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5362 
5363 	}
5364 }
5365 
bnx2_find_max_ring(u32 ring_size,u32 max_size)5366 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5367 {
5368 	u32 max, num_rings = 1;
5369 
5370 	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5371 		ring_size -= BNX2_MAX_RX_DESC_CNT;
5372 		num_rings++;
5373 	}
5374 	/* round to next power of 2 */
5375 	max = max_size;
5376 	while ((max & num_rings) == 0)
5377 		max >>= 1;
5378 
5379 	if (num_rings != max)
5380 		max <<= 1;
5381 
5382 	return max;
5383 }
5384 
5385 static void
bnx2_set_rx_ring_size(struct bnx2 * bp,u32 size)5386 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5387 {
5388 	u32 rx_size, rx_space, jumbo_size;
5389 
5390 	/* 8 for CRC and VLAN */
5391 	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5392 
5393 	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5394 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5395 
5396 	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5397 	bp->rx_pg_ring_size = 0;
5398 	bp->rx_max_pg_ring = 0;
5399 	bp->rx_max_pg_ring_idx = 0;
5400 	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5401 		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5402 
5403 		jumbo_size = size * pages;
5404 		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5405 			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5406 
5407 		bp->rx_pg_ring_size = jumbo_size;
5408 		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5409 							BNX2_MAX_RX_PG_RINGS);
5410 		bp->rx_max_pg_ring_idx =
5411 			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5412 		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5413 		bp->rx_copy_thresh = 0;
5414 	}
5415 
5416 	bp->rx_buf_use_size = rx_size;
5417 	/* hw alignment + build_skb() overhead*/
5418 	bp->rx_buf_size = kmalloc_size_roundup(
5419 		SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5420 		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5421 	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5422 	bp->rx_ring_size = size;
5423 	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5424 	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5425 }
5426 
5427 static void
bnx2_free_tx_skbs(struct bnx2 * bp)5428 bnx2_free_tx_skbs(struct bnx2 *bp)
5429 {
5430 	int i;
5431 
5432 	for (i = 0; i < bp->num_tx_rings; i++) {
5433 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5434 		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5435 		int j;
5436 
5437 		if (!txr->tx_buf_ring)
5438 			continue;
5439 
5440 		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5441 			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5442 			struct sk_buff *skb = tx_buf->skb;
5443 			int k, last;
5444 
5445 			if (!skb) {
5446 				j = BNX2_NEXT_TX_BD(j);
5447 				continue;
5448 			}
5449 
5450 			dma_unmap_single(&bp->pdev->dev,
5451 					 dma_unmap_addr(tx_buf, mapping),
5452 					 skb_headlen(skb),
5453 					 DMA_TO_DEVICE);
5454 
5455 			tx_buf->skb = NULL;
5456 
5457 			last = tx_buf->nr_frags;
5458 			j = BNX2_NEXT_TX_BD(j);
5459 			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5460 				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5461 				dma_unmap_page(&bp->pdev->dev,
5462 					dma_unmap_addr(tx_buf, mapping),
5463 					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5464 					DMA_TO_DEVICE);
5465 			}
5466 			dev_kfree_skb(skb);
5467 		}
5468 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5469 	}
5470 }
5471 
5472 static void
bnx2_free_rx_skbs(struct bnx2 * bp)5473 bnx2_free_rx_skbs(struct bnx2 *bp)
5474 {
5475 	int i;
5476 
5477 	for (i = 0; i < bp->num_rx_rings; i++) {
5478 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5479 		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5480 		int j;
5481 
5482 		if (!rxr->rx_buf_ring)
5483 			return;
5484 
5485 		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5486 			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5487 			u8 *data = rx_buf->data;
5488 
5489 			if (!data)
5490 				continue;
5491 
5492 			dma_unmap_single(&bp->pdev->dev,
5493 					 dma_unmap_addr(rx_buf, mapping),
5494 					 bp->rx_buf_use_size,
5495 					 DMA_FROM_DEVICE);
5496 
5497 			rx_buf->data = NULL;
5498 
5499 			kfree(data);
5500 		}
5501 		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5502 			bnx2_free_rx_page(bp, rxr, j);
5503 	}
5504 }
5505 
5506 static void
bnx2_free_skbs(struct bnx2 * bp)5507 bnx2_free_skbs(struct bnx2 *bp)
5508 {
5509 	bnx2_free_tx_skbs(bp);
5510 	bnx2_free_rx_skbs(bp);
5511 }
5512 
5513 static int
bnx2_reset_nic(struct bnx2 * bp,u32 reset_code)5514 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5515 {
5516 	int rc;
5517 
5518 	rc = bnx2_reset_chip(bp, reset_code);
5519 	bnx2_free_skbs(bp);
5520 	if (rc)
5521 		return rc;
5522 
5523 	if ((rc = bnx2_init_chip(bp)) != 0)
5524 		return rc;
5525 
5526 	bnx2_init_all_rings(bp);
5527 	return 0;
5528 }
5529 
5530 static int
bnx2_init_nic(struct bnx2 * bp,int reset_phy)5531 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5532 {
5533 	int rc;
5534 
5535 	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5536 		return rc;
5537 
5538 	spin_lock_bh(&bp->phy_lock);
5539 	bnx2_init_phy(bp, reset_phy);
5540 	bnx2_set_link(bp);
5541 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5542 		bnx2_remote_phy_event(bp);
5543 	spin_unlock_bh(&bp->phy_lock);
5544 	return 0;
5545 }
5546 
5547 static int
bnx2_shutdown_chip(struct bnx2 * bp)5548 bnx2_shutdown_chip(struct bnx2 *bp)
5549 {
5550 	u32 reset_code;
5551 
5552 	if (bp->flags & BNX2_FLAG_NO_WOL)
5553 		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5554 	else if (bp->wol)
5555 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5556 	else
5557 		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5558 
5559 	return bnx2_reset_chip(bp, reset_code);
5560 }
5561 
5562 static int
bnx2_test_registers(struct bnx2 * bp)5563 bnx2_test_registers(struct bnx2 *bp)
5564 {
5565 	int ret;
5566 	int i, is_5709;
5567 	static const struct {
5568 		u16   offset;
5569 		u16   flags;
5570 #define BNX2_FL_NOT_5709	1
5571 		u32   rw_mask;
5572 		u32   ro_mask;
5573 	} reg_tbl[] = {
5574 		{ 0x006c, 0, 0x00000000, 0x0000003f },
5575 		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5576 		{ 0x0094, 0, 0x00000000, 0x00000000 },
5577 
5578 		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5579 		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5580 		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5581 		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5582 		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5583 		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5584 		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5585 		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586 		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5587 
5588 		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589 		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5590 		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5591 		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5592 		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593 		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5594 
5595 		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5596 		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5597 		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5598 
5599 		{ 0x1000, 0, 0x00000000, 0x00000001 },
5600 		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5601 
5602 		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5603 		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5604 		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5605 		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5606 		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5607 		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5608 		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5609 		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5610 		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5611 		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5612 
5613 		{ 0x1800, 0, 0x00000000, 0x00000001 },
5614 		{ 0x1804, 0, 0x00000000, 0x00000003 },
5615 
5616 		{ 0x2800, 0, 0x00000000, 0x00000001 },
5617 		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5618 		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5619 		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5620 		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5621 		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5622 		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5623 		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5624 		{ 0x2840, 0, 0x00000000, 0xffffffff },
5625 		{ 0x2844, 0, 0x00000000, 0xffffffff },
5626 		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5627 		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5628 
5629 		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5630 		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5631 
5632 		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5633 		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5634 		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5635 		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5636 		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5637 		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5638 		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5639 		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5640 		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5641 
5642 		{ 0x5004, 0, 0x00000000, 0x0000007f },
5643 		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5644 
5645 		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5646 		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5647 		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5648 		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5649 		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5650 		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5651 		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5652 		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5653 		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5654 
5655 		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5656 		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5657 		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5658 		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5659 		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5660 		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5661 		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5662 		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5663 		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5664 		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5665 		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5666 		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5667 		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5668 		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5669 		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5670 		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5671 		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5672 		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5673 		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5674 		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5675 		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5676 		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5677 		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5678 
5679 		{ 0xffff, 0, 0x00000000, 0x00000000 },
5680 	};
5681 
5682 	ret = 0;
5683 	is_5709 = 0;
5684 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5685 		is_5709 = 1;
5686 
5687 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5688 		u32 offset, rw_mask, ro_mask, save_val, val;
5689 		u16 flags = reg_tbl[i].flags;
5690 
5691 		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5692 			continue;
5693 
5694 		offset = (u32) reg_tbl[i].offset;
5695 		rw_mask = reg_tbl[i].rw_mask;
5696 		ro_mask = reg_tbl[i].ro_mask;
5697 
5698 		save_val = readl(bp->regview + offset);
5699 
5700 		writel(0, bp->regview + offset);
5701 
5702 		val = readl(bp->regview + offset);
5703 		if ((val & rw_mask) != 0) {
5704 			goto reg_test_err;
5705 		}
5706 
5707 		if ((val & ro_mask) != (save_val & ro_mask)) {
5708 			goto reg_test_err;
5709 		}
5710 
5711 		writel(0xffffffff, bp->regview + offset);
5712 
5713 		val = readl(bp->regview + offset);
5714 		if ((val & rw_mask) != rw_mask) {
5715 			goto reg_test_err;
5716 		}
5717 
5718 		if ((val & ro_mask) != (save_val & ro_mask)) {
5719 			goto reg_test_err;
5720 		}
5721 
5722 		writel(save_val, bp->regview + offset);
5723 		continue;
5724 
5725 reg_test_err:
5726 		writel(save_val, bp->regview + offset);
5727 		ret = -ENODEV;
5728 		break;
5729 	}
5730 	return ret;
5731 }
5732 
5733 static int
bnx2_do_mem_test(struct bnx2 * bp,u32 start,u32 size)5734 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5735 {
5736 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5737 		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5738 	int i;
5739 
5740 	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5741 		u32 offset;
5742 
5743 		for (offset = 0; offset < size; offset += 4) {
5744 
5745 			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5746 
5747 			if (bnx2_reg_rd_ind(bp, start + offset) !=
5748 				test_pattern[i]) {
5749 				return -ENODEV;
5750 			}
5751 		}
5752 	}
5753 	return 0;
5754 }
5755 
5756 static int
bnx2_test_memory(struct bnx2 * bp)5757 bnx2_test_memory(struct bnx2 *bp)
5758 {
5759 	int ret = 0;
5760 	int i;
5761 	static struct mem_entry {
5762 		u32   offset;
5763 		u32   len;
5764 	} mem_tbl_5706[] = {
5765 		{ 0x60000,  0x4000 },
5766 		{ 0xa0000,  0x3000 },
5767 		{ 0xe0000,  0x4000 },
5768 		{ 0x120000, 0x4000 },
5769 		{ 0x1a0000, 0x4000 },
5770 		{ 0x160000, 0x4000 },
5771 		{ 0xffffffff, 0    },
5772 	},
5773 	mem_tbl_5709[] = {
5774 		{ 0x60000,  0x4000 },
5775 		{ 0xa0000,  0x3000 },
5776 		{ 0xe0000,  0x4000 },
5777 		{ 0x120000, 0x4000 },
5778 		{ 0x1a0000, 0x4000 },
5779 		{ 0xffffffff, 0    },
5780 	};
5781 	struct mem_entry *mem_tbl;
5782 
5783 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5784 		mem_tbl = mem_tbl_5709;
5785 	else
5786 		mem_tbl = mem_tbl_5706;
5787 
5788 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5789 		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5790 			mem_tbl[i].len)) != 0) {
5791 			return ret;
5792 		}
5793 	}
5794 
5795 	return ret;
5796 }
5797 
5798 #define BNX2_MAC_LOOPBACK	0
5799 #define BNX2_PHY_LOOPBACK	1
5800 
5801 static int
bnx2_run_loopback(struct bnx2 * bp,int loopback_mode)5802 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5803 {
5804 	unsigned int pkt_size, num_pkts, i;
5805 	struct sk_buff *skb;
5806 	u8 *data;
5807 	unsigned char *packet;
5808 	u16 rx_start_idx, rx_idx;
5809 	dma_addr_t map;
5810 	struct bnx2_tx_bd *txbd;
5811 	struct bnx2_sw_bd *rx_buf;
5812 	struct l2_fhdr *rx_hdr;
5813 	int ret = -ENODEV;
5814 	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5815 	struct bnx2_tx_ring_info *txr;
5816 	struct bnx2_rx_ring_info *rxr;
5817 
5818 	tx_napi = bnapi;
5819 
5820 	txr = &tx_napi->tx_ring;
5821 	rxr = &bnapi->rx_ring;
5822 	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5823 		bp->loopback = MAC_LOOPBACK;
5824 		bnx2_set_mac_loopback(bp);
5825 	}
5826 	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5827 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5828 			return 0;
5829 
5830 		bp->loopback = PHY_LOOPBACK;
5831 		bnx2_set_phy_loopback(bp);
5832 	}
5833 	else
5834 		return -EINVAL;
5835 
5836 	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5837 	skb = netdev_alloc_skb(bp->dev, pkt_size);
5838 	if (!skb)
5839 		return -ENOMEM;
5840 	packet = skb_put(skb, pkt_size);
5841 	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5842 	memset(packet + ETH_ALEN, 0x0, 8);
5843 	for (i = 14; i < pkt_size; i++)
5844 		packet[i] = (unsigned char) (i & 0xff);
5845 
5846 	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5847 			     DMA_TO_DEVICE);
5848 	if (dma_mapping_error(&bp->pdev->dev, map)) {
5849 		dev_kfree_skb(skb);
5850 		return -EIO;
5851 	}
5852 
5853 	BNX2_WR(bp, BNX2_HC_COMMAND,
5854 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5855 
5856 	BNX2_RD(bp, BNX2_HC_COMMAND);
5857 
5858 	udelay(5);
5859 	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5860 
5861 	num_pkts = 0;
5862 
5863 	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5864 
5865 	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5866 	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5867 	txbd->tx_bd_mss_nbytes = pkt_size;
5868 	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5869 
5870 	num_pkts++;
5871 	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5872 	txr->tx_prod_bseq += pkt_size;
5873 
5874 	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5875 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5876 
5877 	udelay(100);
5878 
5879 	BNX2_WR(bp, BNX2_HC_COMMAND,
5880 		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5881 
5882 	BNX2_RD(bp, BNX2_HC_COMMAND);
5883 
5884 	udelay(5);
5885 
5886 	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5887 	dev_kfree_skb(skb);
5888 
5889 	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5890 		goto loopback_test_done;
5891 
5892 	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5893 	if (rx_idx != rx_start_idx + num_pkts) {
5894 		goto loopback_test_done;
5895 	}
5896 
5897 	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5898 	data = rx_buf->data;
5899 
5900 	rx_hdr = get_l2_fhdr(data);
5901 	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5902 
5903 	dma_sync_single_for_cpu(&bp->pdev->dev,
5904 		dma_unmap_addr(rx_buf, mapping),
5905 		bp->rx_buf_use_size, DMA_FROM_DEVICE);
5906 
5907 	if (rx_hdr->l2_fhdr_status &
5908 		(L2_FHDR_ERRORS_BAD_CRC |
5909 		L2_FHDR_ERRORS_PHY_DECODE |
5910 		L2_FHDR_ERRORS_ALIGNMENT |
5911 		L2_FHDR_ERRORS_TOO_SHORT |
5912 		L2_FHDR_ERRORS_GIANT_FRAME)) {
5913 
5914 		goto loopback_test_done;
5915 	}
5916 
5917 	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5918 		goto loopback_test_done;
5919 	}
5920 
5921 	for (i = 14; i < pkt_size; i++) {
5922 		if (*(data + i) != (unsigned char) (i & 0xff)) {
5923 			goto loopback_test_done;
5924 		}
5925 	}
5926 
5927 	ret = 0;
5928 
5929 loopback_test_done:
5930 	bp->loopback = 0;
5931 	return ret;
5932 }
5933 
5934 #define BNX2_MAC_LOOPBACK_FAILED	1
5935 #define BNX2_PHY_LOOPBACK_FAILED	2
5936 #define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5937 					 BNX2_PHY_LOOPBACK_FAILED)
5938 
5939 static int
bnx2_test_loopback(struct bnx2 * bp)5940 bnx2_test_loopback(struct bnx2 *bp)
5941 {
5942 	int rc = 0;
5943 
5944 	if (!netif_running(bp->dev))
5945 		return BNX2_LOOPBACK_FAILED;
5946 
5947 	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5948 	spin_lock_bh(&bp->phy_lock);
5949 	bnx2_init_phy(bp, 1);
5950 	spin_unlock_bh(&bp->phy_lock);
5951 	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5952 		rc |= BNX2_MAC_LOOPBACK_FAILED;
5953 	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5954 		rc |= BNX2_PHY_LOOPBACK_FAILED;
5955 	return rc;
5956 }
5957 
5958 #define NVRAM_SIZE 0x200
5959 #define CRC32_RESIDUAL 0xdebb20e3
5960 
5961 static int
bnx2_test_nvram(struct bnx2 * bp)5962 bnx2_test_nvram(struct bnx2 *bp)
5963 {
5964 	__be32 buf[NVRAM_SIZE / 4];
5965 	u8 *data = (u8 *) buf;
5966 	int rc = 0;
5967 	u32 magic, csum;
5968 
5969 	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5970 		goto test_nvram_done;
5971 
5972         magic = be32_to_cpu(buf[0]);
5973 	if (magic != 0x669955aa) {
5974 		rc = -ENODEV;
5975 		goto test_nvram_done;
5976 	}
5977 
5978 	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5979 		goto test_nvram_done;
5980 
5981 	csum = ether_crc_le(0x100, data);
5982 	if (csum != CRC32_RESIDUAL) {
5983 		rc = -ENODEV;
5984 		goto test_nvram_done;
5985 	}
5986 
5987 	csum = ether_crc_le(0x100, data + 0x100);
5988 	if (csum != CRC32_RESIDUAL) {
5989 		rc = -ENODEV;
5990 	}
5991 
5992 test_nvram_done:
5993 	return rc;
5994 }
5995 
5996 static int
bnx2_test_link(struct bnx2 * bp)5997 bnx2_test_link(struct bnx2 *bp)
5998 {
5999 	u32 bmsr;
6000 
6001 	if (!netif_running(bp->dev))
6002 		return -ENODEV;
6003 
6004 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6005 		if (bp->link_up)
6006 			return 0;
6007 		return -ENODEV;
6008 	}
6009 	spin_lock_bh(&bp->phy_lock);
6010 	bnx2_enable_bmsr1(bp);
6011 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6012 	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6013 	bnx2_disable_bmsr1(bp);
6014 	spin_unlock_bh(&bp->phy_lock);
6015 
6016 	if (bmsr & BMSR_LSTATUS) {
6017 		return 0;
6018 	}
6019 	return -ENODEV;
6020 }
6021 
6022 static int
bnx2_test_intr(struct bnx2 * bp)6023 bnx2_test_intr(struct bnx2 *bp)
6024 {
6025 	int i;
6026 	u16 status_idx;
6027 
6028 	if (!netif_running(bp->dev))
6029 		return -ENODEV;
6030 
6031 	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6032 
6033 	/* This register is not touched during run-time. */
6034 	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6035 	BNX2_RD(bp, BNX2_HC_COMMAND);
6036 
6037 	for (i = 0; i < 10; i++) {
6038 		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6039 			status_idx) {
6040 
6041 			break;
6042 		}
6043 
6044 		msleep_interruptible(10);
6045 	}
6046 	if (i < 10)
6047 		return 0;
6048 
6049 	return -ENODEV;
6050 }
6051 
6052 /* Determining link for parallel detection. */
6053 static int
bnx2_5706_serdes_has_link(struct bnx2 * bp)6054 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6055 {
6056 	u32 mode_ctl, an_dbg, exp;
6057 
6058 	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6059 		return 0;
6060 
6061 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6062 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6063 
6064 	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6065 		return 0;
6066 
6067 	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6068 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6069 	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6070 
6071 	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6072 		return 0;
6073 
6074 	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6075 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6076 	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6077 
6078 	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6079 		return 0;
6080 
6081 	return 1;
6082 }
6083 
6084 static void
bnx2_5706_serdes_timer(struct bnx2 * bp)6085 bnx2_5706_serdes_timer(struct bnx2 *bp)
6086 {
6087 	int check_link = 1;
6088 
6089 	spin_lock(&bp->phy_lock);
6090 	if (bp->serdes_an_pending) {
6091 		bp->serdes_an_pending--;
6092 		check_link = 0;
6093 	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6094 		u32 bmcr;
6095 
6096 		bp->current_interval = BNX2_TIMER_INTERVAL;
6097 
6098 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6099 
6100 		if (bmcr & BMCR_ANENABLE) {
6101 			if (bnx2_5706_serdes_has_link(bp)) {
6102 				bmcr &= ~BMCR_ANENABLE;
6103 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6104 				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6105 				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6106 			}
6107 		}
6108 	}
6109 	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6110 		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6111 		u32 phy2;
6112 
6113 		bnx2_write_phy(bp, 0x17, 0x0f01);
6114 		bnx2_read_phy(bp, 0x15, &phy2);
6115 		if (phy2 & 0x20) {
6116 			u32 bmcr;
6117 
6118 			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6119 			bmcr |= BMCR_ANENABLE;
6120 			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6121 
6122 			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6123 		}
6124 	} else
6125 		bp->current_interval = BNX2_TIMER_INTERVAL;
6126 
6127 	if (check_link) {
6128 		u32 val;
6129 
6130 		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6131 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6132 		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6133 
6134 		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6135 			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6136 				bnx2_5706s_force_link_dn(bp, 1);
6137 				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6138 			} else
6139 				bnx2_set_link(bp);
6140 		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6141 			bnx2_set_link(bp);
6142 	}
6143 	spin_unlock(&bp->phy_lock);
6144 }
6145 
6146 static void
bnx2_5708_serdes_timer(struct bnx2 * bp)6147 bnx2_5708_serdes_timer(struct bnx2 *bp)
6148 {
6149 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6150 		return;
6151 
6152 	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6153 		bp->serdes_an_pending = 0;
6154 		return;
6155 	}
6156 
6157 	spin_lock(&bp->phy_lock);
6158 	if (bp->serdes_an_pending)
6159 		bp->serdes_an_pending--;
6160 	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6161 		u32 bmcr;
6162 
6163 		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6164 		if (bmcr & BMCR_ANENABLE) {
6165 			bnx2_enable_forced_2g5(bp);
6166 			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6167 		} else {
6168 			bnx2_disable_forced_2g5(bp);
6169 			bp->serdes_an_pending = 2;
6170 			bp->current_interval = BNX2_TIMER_INTERVAL;
6171 		}
6172 
6173 	} else
6174 		bp->current_interval = BNX2_TIMER_INTERVAL;
6175 
6176 	spin_unlock(&bp->phy_lock);
6177 }
6178 
6179 static void
bnx2_timer(struct timer_list * t)6180 bnx2_timer(struct timer_list *t)
6181 {
6182 	struct bnx2 *bp = from_timer(bp, t, timer);
6183 
6184 	if (!netif_running(bp->dev))
6185 		return;
6186 
6187 	if (atomic_read(&bp->intr_sem) != 0)
6188 		goto bnx2_restart_timer;
6189 
6190 	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6191 	     BNX2_FLAG_USING_MSI)
6192 		bnx2_chk_missed_msi(bp);
6193 
6194 	bnx2_send_heart_beat(bp);
6195 
6196 	bp->stats_blk->stat_FwRxDrop =
6197 		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6198 
6199 	/* workaround occasional corrupted counters */
6200 	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6201 		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6202 			BNX2_HC_COMMAND_STATS_NOW);
6203 
6204 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6205 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6206 			bnx2_5706_serdes_timer(bp);
6207 		else
6208 			bnx2_5708_serdes_timer(bp);
6209 	}
6210 
6211 bnx2_restart_timer:
6212 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6213 }
6214 
6215 static int
bnx2_request_irq(struct bnx2 * bp)6216 bnx2_request_irq(struct bnx2 *bp)
6217 {
6218 	unsigned long flags;
6219 	struct bnx2_irq *irq;
6220 	int rc = 0, i;
6221 
6222 	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6223 		flags = 0;
6224 	else
6225 		flags = IRQF_SHARED;
6226 
6227 	for (i = 0; i < bp->irq_nvecs; i++) {
6228 		irq = &bp->irq_tbl[i];
6229 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6230 				 &bp->bnx2_napi[i]);
6231 		if (rc)
6232 			break;
6233 		irq->requested = 1;
6234 	}
6235 	return rc;
6236 }
6237 
6238 static void
__bnx2_free_irq(struct bnx2 * bp)6239 __bnx2_free_irq(struct bnx2 *bp)
6240 {
6241 	struct bnx2_irq *irq;
6242 	int i;
6243 
6244 	for (i = 0; i < bp->irq_nvecs; i++) {
6245 		irq = &bp->irq_tbl[i];
6246 		if (irq->requested)
6247 			free_irq(irq->vector, &bp->bnx2_napi[i]);
6248 		irq->requested = 0;
6249 	}
6250 }
6251 
6252 static void
bnx2_free_irq(struct bnx2 * bp)6253 bnx2_free_irq(struct bnx2 *bp)
6254 {
6255 
6256 	__bnx2_free_irq(bp);
6257 	if (bp->flags & BNX2_FLAG_USING_MSI)
6258 		pci_disable_msi(bp->pdev);
6259 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6260 		pci_disable_msix(bp->pdev);
6261 
6262 	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6263 }
6264 
6265 static void
bnx2_enable_msix(struct bnx2 * bp,int msix_vecs)6266 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6267 {
6268 	int i, total_vecs;
6269 	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6270 	struct net_device *dev = bp->dev;
6271 	const int len = sizeof(bp->irq_tbl[0].name);
6272 
6273 	bnx2_setup_msix_tbl(bp);
6274 	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6275 	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6276 	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6277 
6278 	/*  Need to flush the previous three writes to ensure MSI-X
6279 	 *  is setup properly */
6280 	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6281 
6282 	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6283 		msix_ent[i].entry = i;
6284 		msix_ent[i].vector = 0;
6285 	}
6286 
6287 	total_vecs = msix_vecs;
6288 #ifdef BCM_CNIC
6289 	total_vecs++;
6290 #endif
6291 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6292 					   BNX2_MIN_MSIX_VEC, total_vecs);
6293 	if (total_vecs < 0)
6294 		return;
6295 
6296 	msix_vecs = total_vecs;
6297 #ifdef BCM_CNIC
6298 	msix_vecs--;
6299 #endif
6300 	bp->irq_nvecs = msix_vecs;
6301 	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6302 	for (i = 0; i < total_vecs; i++) {
6303 		bp->irq_tbl[i].vector = msix_ent[i].vector;
6304 		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6305 		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6306 	}
6307 }
6308 
6309 static int
bnx2_setup_int_mode(struct bnx2 * bp,int dis_msi)6310 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6311 {
6312 	int cpus = netif_get_num_default_rss_queues();
6313 	int msix_vecs;
6314 
6315 	if (!bp->num_req_rx_rings)
6316 		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6317 	else if (!bp->num_req_tx_rings)
6318 		msix_vecs = max(cpus, bp->num_req_rx_rings);
6319 	else
6320 		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6321 
6322 	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6323 
6324 	bp->irq_tbl[0].handler = bnx2_interrupt;
6325 	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6326 	bp->irq_nvecs = 1;
6327 	bp->irq_tbl[0].vector = bp->pdev->irq;
6328 
6329 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6330 		bnx2_enable_msix(bp, msix_vecs);
6331 
6332 	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6333 	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6334 		if (pci_enable_msi(bp->pdev) == 0) {
6335 			bp->flags |= BNX2_FLAG_USING_MSI;
6336 			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6337 				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6338 				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6339 			} else
6340 				bp->irq_tbl[0].handler = bnx2_msi;
6341 
6342 			bp->irq_tbl[0].vector = bp->pdev->irq;
6343 		}
6344 	}
6345 
6346 	if (!bp->num_req_tx_rings)
6347 		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6348 	else
6349 		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6350 
6351 	if (!bp->num_req_rx_rings)
6352 		bp->num_rx_rings = bp->irq_nvecs;
6353 	else
6354 		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6355 
6356 	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6357 
6358 	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6359 }
6360 
6361 /* Called with rtnl_lock */
6362 static int
bnx2_open(struct net_device * dev)6363 bnx2_open(struct net_device *dev)
6364 {
6365 	struct bnx2 *bp = netdev_priv(dev);
6366 	int rc;
6367 
6368 	rc = bnx2_request_firmware(bp);
6369 	if (rc < 0)
6370 		goto out;
6371 
6372 	netif_carrier_off(dev);
6373 
6374 	bnx2_disable_int(bp);
6375 
6376 	rc = bnx2_setup_int_mode(bp, disable_msi);
6377 	if (rc)
6378 		goto open_err;
6379 	bnx2_init_napi(bp);
6380 	bnx2_napi_enable(bp);
6381 	rc = bnx2_alloc_mem(bp);
6382 	if (rc)
6383 		goto open_err;
6384 
6385 	rc = bnx2_request_irq(bp);
6386 	if (rc)
6387 		goto open_err;
6388 
6389 	rc = bnx2_init_nic(bp, 1);
6390 	if (rc)
6391 		goto open_err;
6392 
6393 	mod_timer(&bp->timer, jiffies + bp->current_interval);
6394 
6395 	atomic_set(&bp->intr_sem, 0);
6396 
6397 	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6398 
6399 	bnx2_enable_int(bp);
6400 
6401 	if (bp->flags & BNX2_FLAG_USING_MSI) {
6402 		/* Test MSI to make sure it is working
6403 		 * If MSI test fails, go back to INTx mode
6404 		 */
6405 		if (bnx2_test_intr(bp) != 0) {
6406 			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6407 
6408 			bnx2_disable_int(bp);
6409 			bnx2_free_irq(bp);
6410 
6411 			bnx2_setup_int_mode(bp, 1);
6412 
6413 			rc = bnx2_init_nic(bp, 0);
6414 
6415 			if (!rc)
6416 				rc = bnx2_request_irq(bp);
6417 
6418 			if (rc) {
6419 				del_timer_sync(&bp->timer);
6420 				goto open_err;
6421 			}
6422 			bnx2_enable_int(bp);
6423 		}
6424 	}
6425 	if (bp->flags & BNX2_FLAG_USING_MSI)
6426 		netdev_info(dev, "using MSI\n");
6427 	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6428 		netdev_info(dev, "using MSIX\n");
6429 
6430 	netif_tx_start_all_queues(dev);
6431 out:
6432 	return rc;
6433 
6434 open_err:
6435 	bnx2_napi_disable(bp);
6436 	bnx2_free_skbs(bp);
6437 	bnx2_free_irq(bp);
6438 	bnx2_free_mem(bp);
6439 	bnx2_del_napi(bp);
6440 	bnx2_release_firmware(bp);
6441 	goto out;
6442 }
6443 
6444 static void
bnx2_reset_task(struct work_struct * work)6445 bnx2_reset_task(struct work_struct *work)
6446 {
6447 	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6448 	int rc;
6449 	u16 pcicmd;
6450 
6451 	rtnl_lock();
6452 	if (!netif_running(bp->dev)) {
6453 		rtnl_unlock();
6454 		return;
6455 	}
6456 
6457 	bnx2_netif_stop(bp, true);
6458 
6459 	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6460 	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6461 		/* in case PCI block has reset */
6462 		pci_restore_state(bp->pdev);
6463 		pci_save_state(bp->pdev);
6464 	}
6465 	rc = bnx2_init_nic(bp, 1);
6466 	if (rc) {
6467 		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6468 		bnx2_napi_enable(bp);
6469 		dev_close(bp->dev);
6470 		rtnl_unlock();
6471 		return;
6472 	}
6473 
6474 	atomic_set(&bp->intr_sem, 1);
6475 	bnx2_netif_start(bp, true);
6476 	rtnl_unlock();
6477 }
6478 
6479 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6480 
6481 static void
bnx2_dump_ftq(struct bnx2 * bp)6482 bnx2_dump_ftq(struct bnx2 *bp)
6483 {
6484 	int i;
6485 	u32 reg, bdidx, cid, valid;
6486 	struct net_device *dev = bp->dev;
6487 	static const struct ftq_reg {
6488 		char *name;
6489 		u32 off;
6490 	} ftq_arr[] = {
6491 		BNX2_FTQ_ENTRY(RV2P_P),
6492 		BNX2_FTQ_ENTRY(RV2P_T),
6493 		BNX2_FTQ_ENTRY(RV2P_M),
6494 		BNX2_FTQ_ENTRY(TBDR_),
6495 		BNX2_FTQ_ENTRY(TDMA_),
6496 		BNX2_FTQ_ENTRY(TXP_),
6497 		BNX2_FTQ_ENTRY(TXP_),
6498 		BNX2_FTQ_ENTRY(TPAT_),
6499 		BNX2_FTQ_ENTRY(RXP_C),
6500 		BNX2_FTQ_ENTRY(RXP_),
6501 		BNX2_FTQ_ENTRY(COM_COMXQ_),
6502 		BNX2_FTQ_ENTRY(COM_COMTQ_),
6503 		BNX2_FTQ_ENTRY(COM_COMQ_),
6504 		BNX2_FTQ_ENTRY(CP_CPQ_),
6505 	};
6506 
6507 	netdev_err(dev, "<--- start FTQ dump --->\n");
6508 	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6509 		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6510 			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6511 
6512 	netdev_err(dev, "CPU states:\n");
6513 	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6514 		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6515 			   reg, bnx2_reg_rd_ind(bp, reg),
6516 			   bnx2_reg_rd_ind(bp, reg + 4),
6517 			   bnx2_reg_rd_ind(bp, reg + 8),
6518 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6519 			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6520 			   bnx2_reg_rd_ind(bp, reg + 0x20));
6521 
6522 	netdev_err(dev, "<--- end FTQ dump --->\n");
6523 	netdev_err(dev, "<--- start TBDC dump --->\n");
6524 	netdev_err(dev, "TBDC free cnt: %ld\n",
6525 		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6526 	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6527 	for (i = 0; i < 0x20; i++) {
6528 		int j = 0;
6529 
6530 		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6531 		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6532 			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6533 		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6534 		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6535 			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6536 			j++;
6537 
6538 		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6539 		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6540 		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6541 		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6542 			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6543 			   bdidx >> 24, (valid >> 8) & 0x0ff);
6544 	}
6545 	netdev_err(dev, "<--- end TBDC dump --->\n");
6546 }
6547 
6548 static void
bnx2_dump_state(struct bnx2 * bp)6549 bnx2_dump_state(struct bnx2 *bp)
6550 {
6551 	struct net_device *dev = bp->dev;
6552 	u32 val1, val2;
6553 
6554 	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6555 	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6556 		   atomic_read(&bp->intr_sem), val1);
6557 	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6558 	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6559 	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6560 	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6561 		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6562 		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6563 	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6564 		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6565 	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6566 		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6567 	if (bp->flags & BNX2_FLAG_USING_MSIX)
6568 		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6569 			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6570 }
6571 
6572 static void
bnx2_tx_timeout(struct net_device * dev,unsigned int txqueue)6573 bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6574 {
6575 	struct bnx2 *bp = netdev_priv(dev);
6576 
6577 	bnx2_dump_ftq(bp);
6578 	bnx2_dump_state(bp);
6579 	bnx2_dump_mcp_state(bp);
6580 
6581 	/* This allows the netif to be shutdown gracefully before resetting */
6582 	schedule_work(&bp->reset_task);
6583 }
6584 
6585 /* Called with netif_tx_lock.
6586  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6587  * netif_wake_queue().
6588  */
6589 static netdev_tx_t
bnx2_start_xmit(struct sk_buff * skb,struct net_device * dev)6590 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6591 {
6592 	struct bnx2 *bp = netdev_priv(dev);
6593 	dma_addr_t mapping;
6594 	struct bnx2_tx_bd *txbd;
6595 	struct bnx2_sw_tx_bd *tx_buf;
6596 	u32 len, vlan_tag_flags, last_frag, mss;
6597 	u16 prod, ring_prod;
6598 	int i;
6599 	struct bnx2_napi *bnapi;
6600 	struct bnx2_tx_ring_info *txr;
6601 	struct netdev_queue *txq;
6602 
6603 	/*  Determine which tx ring we will be placed on */
6604 	i = skb_get_queue_mapping(skb);
6605 	bnapi = &bp->bnx2_napi[i];
6606 	txr = &bnapi->tx_ring;
6607 	txq = netdev_get_tx_queue(dev, i);
6608 
6609 	if (unlikely(bnx2_tx_avail(bp, txr) <
6610 	    (skb_shinfo(skb)->nr_frags + 1))) {
6611 		netif_tx_stop_queue(txq);
6612 		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6613 
6614 		return NETDEV_TX_BUSY;
6615 	}
6616 	len = skb_headlen(skb);
6617 	prod = txr->tx_prod;
6618 	ring_prod = BNX2_TX_RING_IDX(prod);
6619 
6620 	vlan_tag_flags = 0;
6621 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6622 		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6623 	}
6624 
6625 	if (skb_vlan_tag_present(skb)) {
6626 		vlan_tag_flags |=
6627 			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6628 	}
6629 
6630 	if ((mss = skb_shinfo(skb)->gso_size)) {
6631 		u32 tcp_opt_len;
6632 		struct iphdr *iph;
6633 
6634 		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6635 
6636 		tcp_opt_len = tcp_optlen(skb);
6637 
6638 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6639 			u32 tcp_off = skb_transport_offset(skb) -
6640 				      sizeof(struct ipv6hdr) - ETH_HLEN;
6641 
6642 			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6643 					  TX_BD_FLAGS_SW_FLAGS;
6644 			if (likely(tcp_off == 0))
6645 				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6646 			else {
6647 				tcp_off >>= 3;
6648 				vlan_tag_flags |= ((tcp_off & 0x3) <<
6649 						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6650 						  ((tcp_off & 0x10) <<
6651 						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6652 				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6653 			}
6654 		} else {
6655 			iph = ip_hdr(skb);
6656 			if (tcp_opt_len || (iph->ihl > 5)) {
6657 				vlan_tag_flags |= ((iph->ihl - 5) +
6658 						   (tcp_opt_len >> 2)) << 8;
6659 			}
6660 		}
6661 	} else
6662 		mss = 0;
6663 
6664 	mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6665 				 DMA_TO_DEVICE);
6666 	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6667 		dev_kfree_skb_any(skb);
6668 		return NETDEV_TX_OK;
6669 	}
6670 
6671 	tx_buf = &txr->tx_buf_ring[ring_prod];
6672 	tx_buf->skb = skb;
6673 	dma_unmap_addr_set(tx_buf, mapping, mapping);
6674 
6675 	txbd = &txr->tx_desc_ring[ring_prod];
6676 
6677 	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6678 	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6679 	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6680 	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6681 
6682 	last_frag = skb_shinfo(skb)->nr_frags;
6683 	tx_buf->nr_frags = last_frag;
6684 	tx_buf->is_gso = skb_is_gso(skb);
6685 
6686 	for (i = 0; i < last_frag; i++) {
6687 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6688 
6689 		prod = BNX2_NEXT_TX_BD(prod);
6690 		ring_prod = BNX2_TX_RING_IDX(prod);
6691 		txbd = &txr->tx_desc_ring[ring_prod];
6692 
6693 		len = skb_frag_size(frag);
6694 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6695 					   DMA_TO_DEVICE);
6696 		if (dma_mapping_error(&bp->pdev->dev, mapping))
6697 			goto dma_error;
6698 		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6699 				   mapping);
6700 
6701 		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6702 		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6703 		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6704 		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6705 
6706 	}
6707 	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6708 
6709 	/* Sync BD data before updating TX mailbox */
6710 	wmb();
6711 
6712 	netdev_tx_sent_queue(txq, skb->len);
6713 
6714 	prod = BNX2_NEXT_TX_BD(prod);
6715 	txr->tx_prod_bseq += skb->len;
6716 
6717 	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6718 	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6719 
6720 	txr->tx_prod = prod;
6721 
6722 	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6723 		netif_tx_stop_queue(txq);
6724 
6725 		/* netif_tx_stop_queue() must be done before checking
6726 		 * tx index in bnx2_tx_avail() below, because in
6727 		 * bnx2_tx_int(), we update tx index before checking for
6728 		 * netif_tx_queue_stopped().
6729 		 */
6730 		smp_mb();
6731 		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6732 			netif_tx_wake_queue(txq);
6733 	}
6734 
6735 	return NETDEV_TX_OK;
6736 dma_error:
6737 	/* save value of frag that failed */
6738 	last_frag = i;
6739 
6740 	/* start back at beginning and unmap skb */
6741 	prod = txr->tx_prod;
6742 	ring_prod = BNX2_TX_RING_IDX(prod);
6743 	tx_buf = &txr->tx_buf_ring[ring_prod];
6744 	tx_buf->skb = NULL;
6745 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6746 			 skb_headlen(skb), DMA_TO_DEVICE);
6747 
6748 	/* unmap remaining mapped pages */
6749 	for (i = 0; i < last_frag; i++) {
6750 		prod = BNX2_NEXT_TX_BD(prod);
6751 		ring_prod = BNX2_TX_RING_IDX(prod);
6752 		tx_buf = &txr->tx_buf_ring[ring_prod];
6753 		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6754 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6755 			       DMA_TO_DEVICE);
6756 	}
6757 
6758 	dev_kfree_skb_any(skb);
6759 	return NETDEV_TX_OK;
6760 }
6761 
6762 /* Called with rtnl_lock */
6763 static int
bnx2_close(struct net_device * dev)6764 bnx2_close(struct net_device *dev)
6765 {
6766 	struct bnx2 *bp = netdev_priv(dev);
6767 
6768 	bnx2_disable_int_sync(bp);
6769 	bnx2_napi_disable(bp);
6770 	netif_tx_disable(dev);
6771 	del_timer_sync(&bp->timer);
6772 	bnx2_shutdown_chip(bp);
6773 	bnx2_free_irq(bp);
6774 	bnx2_free_skbs(bp);
6775 	bnx2_free_mem(bp);
6776 	bnx2_del_napi(bp);
6777 	bp->link_up = 0;
6778 	netif_carrier_off(bp->dev);
6779 	return 0;
6780 }
6781 
6782 static void
bnx2_save_stats(struct bnx2 * bp)6783 bnx2_save_stats(struct bnx2 *bp)
6784 {
6785 	u32 *hw_stats = (u32 *) bp->stats_blk;
6786 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6787 	int i;
6788 
6789 	/* The 1st 10 counters are 64-bit counters */
6790 	for (i = 0; i < 20; i += 2) {
6791 		u32 hi;
6792 		u64 lo;
6793 
6794 		hi = temp_stats[i] + hw_stats[i];
6795 		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6796 		if (lo > 0xffffffff)
6797 			hi++;
6798 		temp_stats[i] = hi;
6799 		temp_stats[i + 1] = lo & 0xffffffff;
6800 	}
6801 
6802 	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6803 		temp_stats[i] += hw_stats[i];
6804 }
6805 
6806 #define GET_64BIT_NET_STATS64(ctr)		\
6807 	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6808 
6809 #define GET_64BIT_NET_STATS(ctr)				\
6810 	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6811 	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6812 
6813 #define GET_32BIT_NET_STATS(ctr)				\
6814 	(unsigned long) (bp->stats_blk->ctr +			\
6815 			 bp->temp_stats_blk->ctr)
6816 
6817 static void
bnx2_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * net_stats)6818 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6819 {
6820 	struct bnx2 *bp = netdev_priv(dev);
6821 
6822 	if (!bp->stats_blk)
6823 		return;
6824 
6825 	net_stats->rx_packets =
6826 		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6827 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6828 		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6829 
6830 	net_stats->tx_packets =
6831 		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6832 		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6833 		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6834 
6835 	net_stats->rx_bytes =
6836 		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6837 
6838 	net_stats->tx_bytes =
6839 		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6840 
6841 	net_stats->multicast =
6842 		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6843 
6844 	net_stats->collisions =
6845 		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6846 
6847 	net_stats->rx_length_errors =
6848 		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6849 		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6850 
6851 	net_stats->rx_over_errors =
6852 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6853 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6854 
6855 	net_stats->rx_frame_errors =
6856 		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6857 
6858 	net_stats->rx_crc_errors =
6859 		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6860 
6861 	net_stats->rx_errors = net_stats->rx_length_errors +
6862 		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6863 		net_stats->rx_crc_errors;
6864 
6865 	net_stats->tx_aborted_errors =
6866 		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6867 		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6868 
6869 	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6870 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6871 		net_stats->tx_carrier_errors = 0;
6872 	else {
6873 		net_stats->tx_carrier_errors =
6874 			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6875 	}
6876 
6877 	net_stats->tx_errors =
6878 		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6879 		net_stats->tx_aborted_errors +
6880 		net_stats->tx_carrier_errors;
6881 
6882 	net_stats->rx_missed_errors =
6883 		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6884 		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6885 		GET_32BIT_NET_STATS(stat_FwRxDrop);
6886 
6887 }
6888 
6889 /* All ethtool functions called with rtnl_lock */
6890 
6891 static int
bnx2_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)6892 bnx2_get_link_ksettings(struct net_device *dev,
6893 			struct ethtool_link_ksettings *cmd)
6894 {
6895 	struct bnx2 *bp = netdev_priv(dev);
6896 	int support_serdes = 0, support_copper = 0;
6897 	u32 supported, advertising;
6898 
6899 	supported = SUPPORTED_Autoneg;
6900 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6901 		support_serdes = 1;
6902 		support_copper = 1;
6903 	} else if (bp->phy_port == PORT_FIBRE)
6904 		support_serdes = 1;
6905 	else
6906 		support_copper = 1;
6907 
6908 	if (support_serdes) {
6909 		supported |= SUPPORTED_1000baseT_Full |
6910 			SUPPORTED_FIBRE;
6911 		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6912 			supported |= SUPPORTED_2500baseX_Full;
6913 	}
6914 	if (support_copper) {
6915 		supported |= SUPPORTED_10baseT_Half |
6916 			SUPPORTED_10baseT_Full |
6917 			SUPPORTED_100baseT_Half |
6918 			SUPPORTED_100baseT_Full |
6919 			SUPPORTED_1000baseT_Full |
6920 			SUPPORTED_TP;
6921 	}
6922 
6923 	spin_lock_bh(&bp->phy_lock);
6924 	cmd->base.port = bp->phy_port;
6925 	advertising = bp->advertising;
6926 
6927 	if (bp->autoneg & AUTONEG_SPEED) {
6928 		cmd->base.autoneg = AUTONEG_ENABLE;
6929 	} else {
6930 		cmd->base.autoneg = AUTONEG_DISABLE;
6931 	}
6932 
6933 	if (netif_carrier_ok(dev)) {
6934 		cmd->base.speed = bp->line_speed;
6935 		cmd->base.duplex = bp->duplex;
6936 		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6937 			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6938 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6939 			else
6940 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6941 		}
6942 	}
6943 	else {
6944 		cmd->base.speed = SPEED_UNKNOWN;
6945 		cmd->base.duplex = DUPLEX_UNKNOWN;
6946 	}
6947 	spin_unlock_bh(&bp->phy_lock);
6948 
6949 	cmd->base.phy_address = bp->phy_addr;
6950 
6951 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6952 						supported);
6953 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6954 						advertising);
6955 
6956 	return 0;
6957 }
6958 
6959 static int
bnx2_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)6960 bnx2_set_link_ksettings(struct net_device *dev,
6961 			const struct ethtool_link_ksettings *cmd)
6962 {
6963 	struct bnx2 *bp = netdev_priv(dev);
6964 	u8 autoneg = bp->autoneg;
6965 	u8 req_duplex = bp->req_duplex;
6966 	u16 req_line_speed = bp->req_line_speed;
6967 	u32 advertising = bp->advertising;
6968 	int err = -EINVAL;
6969 
6970 	spin_lock_bh(&bp->phy_lock);
6971 
6972 	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6973 		goto err_out_unlock;
6974 
6975 	if (cmd->base.port != bp->phy_port &&
6976 	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6977 		goto err_out_unlock;
6978 
6979 	/* If device is down, we can store the settings only if the user
6980 	 * is setting the currently active port.
6981 	 */
6982 	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6983 		goto err_out_unlock;
6984 
6985 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6986 		autoneg |= AUTONEG_SPEED;
6987 
6988 		ethtool_convert_link_mode_to_legacy_u32(
6989 			&advertising, cmd->link_modes.advertising);
6990 
6991 		if (cmd->base.port == PORT_TP) {
6992 			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6993 			if (!advertising)
6994 				advertising = ETHTOOL_ALL_COPPER_SPEED;
6995 		} else {
6996 			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6997 			if (!advertising)
6998 				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6999 		}
7000 		advertising |= ADVERTISED_Autoneg;
7001 	}
7002 	else {
7003 		u32 speed = cmd->base.speed;
7004 
7005 		if (cmd->base.port == PORT_FIBRE) {
7006 			if ((speed != SPEED_1000 &&
7007 			     speed != SPEED_2500) ||
7008 			    (cmd->base.duplex != DUPLEX_FULL))
7009 				goto err_out_unlock;
7010 
7011 			if (speed == SPEED_2500 &&
7012 			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7013 				goto err_out_unlock;
7014 		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7015 			goto err_out_unlock;
7016 
7017 		autoneg &= ~AUTONEG_SPEED;
7018 		req_line_speed = speed;
7019 		req_duplex = cmd->base.duplex;
7020 		advertising = 0;
7021 	}
7022 
7023 	bp->autoneg = autoneg;
7024 	bp->advertising = advertising;
7025 	bp->req_line_speed = req_line_speed;
7026 	bp->req_duplex = req_duplex;
7027 
7028 	err = 0;
7029 	/* If device is down, the new settings will be picked up when it is
7030 	 * brought up.
7031 	 */
7032 	if (netif_running(dev))
7033 		err = bnx2_setup_phy(bp, cmd->base.port);
7034 
7035 err_out_unlock:
7036 	spin_unlock_bh(&bp->phy_lock);
7037 
7038 	return err;
7039 }
7040 
7041 static void
bnx2_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)7042 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7043 {
7044 	struct bnx2 *bp = netdev_priv(dev);
7045 
7046 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7047 	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7048 	strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7049 }
7050 
7051 #define BNX2_REGDUMP_LEN		(32 * 1024)
7052 
7053 static int
bnx2_get_regs_len(struct net_device * dev)7054 bnx2_get_regs_len(struct net_device *dev)
7055 {
7056 	return BNX2_REGDUMP_LEN;
7057 }
7058 
7059 static void
bnx2_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)7060 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7061 {
7062 	u32 *p = _p, i, offset;
7063 	u8 *orig_p = _p;
7064 	struct bnx2 *bp = netdev_priv(dev);
7065 	static const u32 reg_boundaries[] = {
7066 		0x0000, 0x0098, 0x0400, 0x045c,
7067 		0x0800, 0x0880, 0x0c00, 0x0c10,
7068 		0x0c30, 0x0d08, 0x1000, 0x101c,
7069 		0x1040, 0x1048, 0x1080, 0x10a4,
7070 		0x1400, 0x1490, 0x1498, 0x14f0,
7071 		0x1500, 0x155c, 0x1580, 0x15dc,
7072 		0x1600, 0x1658, 0x1680, 0x16d8,
7073 		0x1800, 0x1820, 0x1840, 0x1854,
7074 		0x1880, 0x1894, 0x1900, 0x1984,
7075 		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7076 		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7077 		0x2000, 0x2030, 0x23c0, 0x2400,
7078 		0x2800, 0x2820, 0x2830, 0x2850,
7079 		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7080 		0x3c00, 0x3c94, 0x4000, 0x4010,
7081 		0x4080, 0x4090, 0x43c0, 0x4458,
7082 		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7083 		0x4fc0, 0x5010, 0x53c0, 0x5444,
7084 		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7085 		0x5fc0, 0x6000, 0x6400, 0x6428,
7086 		0x6800, 0x6848, 0x684c, 0x6860,
7087 		0x6888, 0x6910, 0x8000
7088 	};
7089 
7090 	regs->version = 0;
7091 
7092 	memset(p, 0, BNX2_REGDUMP_LEN);
7093 
7094 	if (!netif_running(bp->dev))
7095 		return;
7096 
7097 	i = 0;
7098 	offset = reg_boundaries[0];
7099 	p += offset;
7100 	while (offset < BNX2_REGDUMP_LEN) {
7101 		*p++ = BNX2_RD(bp, offset);
7102 		offset += 4;
7103 		if (offset == reg_boundaries[i + 1]) {
7104 			offset = reg_boundaries[i + 2];
7105 			p = (u32 *) (orig_p + offset);
7106 			i += 2;
7107 		}
7108 	}
7109 }
7110 
7111 static void
bnx2_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7112 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7113 {
7114 	struct bnx2 *bp = netdev_priv(dev);
7115 
7116 	if (bp->flags & BNX2_FLAG_NO_WOL) {
7117 		wol->supported = 0;
7118 		wol->wolopts = 0;
7119 	}
7120 	else {
7121 		wol->supported = WAKE_MAGIC;
7122 		if (bp->wol)
7123 			wol->wolopts = WAKE_MAGIC;
7124 		else
7125 			wol->wolopts = 0;
7126 	}
7127 	memset(&wol->sopass, 0, sizeof(wol->sopass));
7128 }
7129 
7130 static int
bnx2_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)7131 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7132 {
7133 	struct bnx2 *bp = netdev_priv(dev);
7134 
7135 	if (wol->wolopts & ~WAKE_MAGIC)
7136 		return -EINVAL;
7137 
7138 	if (wol->wolopts & WAKE_MAGIC) {
7139 		if (bp->flags & BNX2_FLAG_NO_WOL)
7140 			return -EINVAL;
7141 
7142 		bp->wol = 1;
7143 	}
7144 	else {
7145 		bp->wol = 0;
7146 	}
7147 
7148 	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7149 
7150 	return 0;
7151 }
7152 
7153 static int
bnx2_nway_reset(struct net_device * dev)7154 bnx2_nway_reset(struct net_device *dev)
7155 {
7156 	struct bnx2 *bp = netdev_priv(dev);
7157 	u32 bmcr;
7158 
7159 	if (!netif_running(dev))
7160 		return -EAGAIN;
7161 
7162 	if (!(bp->autoneg & AUTONEG_SPEED)) {
7163 		return -EINVAL;
7164 	}
7165 
7166 	spin_lock_bh(&bp->phy_lock);
7167 
7168 	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7169 		int rc;
7170 
7171 		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7172 		spin_unlock_bh(&bp->phy_lock);
7173 		return rc;
7174 	}
7175 
7176 	/* Force a link down visible on the other side */
7177 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7178 		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7179 		spin_unlock_bh(&bp->phy_lock);
7180 
7181 		msleep(20);
7182 
7183 		spin_lock_bh(&bp->phy_lock);
7184 
7185 		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7186 		bp->serdes_an_pending = 1;
7187 		mod_timer(&bp->timer, jiffies + bp->current_interval);
7188 	}
7189 
7190 	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7191 	bmcr &= ~BMCR_LOOPBACK;
7192 	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7193 
7194 	spin_unlock_bh(&bp->phy_lock);
7195 
7196 	return 0;
7197 }
7198 
7199 static u32
bnx2_get_link(struct net_device * dev)7200 bnx2_get_link(struct net_device *dev)
7201 {
7202 	struct bnx2 *bp = netdev_priv(dev);
7203 
7204 	return bp->link_up;
7205 }
7206 
7207 static int
bnx2_get_eeprom_len(struct net_device * dev)7208 bnx2_get_eeprom_len(struct net_device *dev)
7209 {
7210 	struct bnx2 *bp = netdev_priv(dev);
7211 
7212 	if (!bp->flash_info)
7213 		return 0;
7214 
7215 	return (int) bp->flash_size;
7216 }
7217 
7218 static int
bnx2_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7219 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7220 		u8 *eebuf)
7221 {
7222 	struct bnx2 *bp = netdev_priv(dev);
7223 	int rc;
7224 
7225 	/* parameters already validated in ethtool_get_eeprom */
7226 
7227 	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7228 
7229 	return rc;
7230 }
7231 
7232 static int
bnx2_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * eebuf)7233 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7234 		u8 *eebuf)
7235 {
7236 	struct bnx2 *bp = netdev_priv(dev);
7237 	int rc;
7238 
7239 	/* parameters already validated in ethtool_set_eeprom */
7240 
7241 	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7242 
7243 	return rc;
7244 }
7245 
bnx2_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7246 static int bnx2_get_coalesce(struct net_device *dev,
7247 			     struct ethtool_coalesce *coal,
7248 			     struct kernel_ethtool_coalesce *kernel_coal,
7249 			     struct netlink_ext_ack *extack)
7250 {
7251 	struct bnx2 *bp = netdev_priv(dev);
7252 
7253 	memset(coal, 0, sizeof(struct ethtool_coalesce));
7254 
7255 	coal->rx_coalesce_usecs = bp->rx_ticks;
7256 	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7257 	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7258 	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7259 
7260 	coal->tx_coalesce_usecs = bp->tx_ticks;
7261 	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7262 	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7263 	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7264 
7265 	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7266 
7267 	return 0;
7268 }
7269 
bnx2_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)7270 static int bnx2_set_coalesce(struct net_device *dev,
7271 			     struct ethtool_coalesce *coal,
7272 			     struct kernel_ethtool_coalesce *kernel_coal,
7273 			     struct netlink_ext_ack *extack)
7274 {
7275 	struct bnx2 *bp = netdev_priv(dev);
7276 
7277 	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7278 	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7279 
7280 	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7281 	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7282 
7283 	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7284 	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7285 
7286 	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7287 	if (bp->rx_quick_cons_trip_int > 0xff)
7288 		bp->rx_quick_cons_trip_int = 0xff;
7289 
7290 	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7291 	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7292 
7293 	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7294 	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7295 
7296 	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7297 	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7298 
7299 	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7300 	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7301 		0xff;
7302 
7303 	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7304 	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7305 		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7306 			bp->stats_ticks = USEC_PER_SEC;
7307 	}
7308 	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7309 		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310 	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7311 
7312 	if (netif_running(bp->dev)) {
7313 		bnx2_netif_stop(bp, true);
7314 		bnx2_init_nic(bp, 0);
7315 		bnx2_netif_start(bp, true);
7316 	}
7317 
7318 	return 0;
7319 }
7320 
7321 static void
bnx2_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7322 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7323 		   struct kernel_ethtool_ringparam *kernel_ering,
7324 		   struct netlink_ext_ack *extack)
7325 {
7326 	struct bnx2 *bp = netdev_priv(dev);
7327 
7328 	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7329 	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7330 
7331 	ering->rx_pending = bp->rx_ring_size;
7332 	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7333 
7334 	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7335 	ering->tx_pending = bp->tx_ring_size;
7336 }
7337 
7338 static int
bnx2_change_ring_size(struct bnx2 * bp,u32 rx,u32 tx,bool reset_irq)7339 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7340 {
7341 	if (netif_running(bp->dev)) {
7342 		/* Reset will erase chipset stats; save them */
7343 		bnx2_save_stats(bp);
7344 
7345 		bnx2_netif_stop(bp, true);
7346 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7347 		if (reset_irq) {
7348 			bnx2_free_irq(bp);
7349 			bnx2_del_napi(bp);
7350 		} else {
7351 			__bnx2_free_irq(bp);
7352 		}
7353 		bnx2_free_skbs(bp);
7354 		bnx2_free_mem(bp);
7355 	}
7356 
7357 	bnx2_set_rx_ring_size(bp, rx);
7358 	bp->tx_ring_size = tx;
7359 
7360 	if (netif_running(bp->dev)) {
7361 		int rc = 0;
7362 
7363 		if (reset_irq) {
7364 			rc = bnx2_setup_int_mode(bp, disable_msi);
7365 			bnx2_init_napi(bp);
7366 		}
7367 
7368 		if (!rc)
7369 			rc = bnx2_alloc_mem(bp);
7370 
7371 		if (!rc)
7372 			rc = bnx2_request_irq(bp);
7373 
7374 		if (!rc)
7375 			rc = bnx2_init_nic(bp, 0);
7376 
7377 		if (rc) {
7378 			bnx2_napi_enable(bp);
7379 			dev_close(bp->dev);
7380 			return rc;
7381 		}
7382 #ifdef BCM_CNIC
7383 		mutex_lock(&bp->cnic_lock);
7384 		/* Let cnic know about the new status block. */
7385 		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7386 			bnx2_setup_cnic_irq_info(bp);
7387 		mutex_unlock(&bp->cnic_lock);
7388 #endif
7389 		bnx2_netif_start(bp, true);
7390 	}
7391 	return 0;
7392 }
7393 
7394 static int
bnx2_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)7395 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7396 		   struct kernel_ethtool_ringparam *kernel_ering,
7397 		   struct netlink_ext_ack *extack)
7398 {
7399 	struct bnx2 *bp = netdev_priv(dev);
7400 	int rc;
7401 
7402 	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7403 		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7404 		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7405 
7406 		return -EINVAL;
7407 	}
7408 	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7409 				   false);
7410 	return rc;
7411 }
7412 
7413 static void
bnx2_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7414 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415 {
7416 	struct bnx2 *bp = netdev_priv(dev);
7417 
7418 	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7419 	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7420 	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7421 }
7422 
7423 static int
bnx2_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)7424 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7425 {
7426 	struct bnx2 *bp = netdev_priv(dev);
7427 
7428 	bp->req_flow_ctrl = 0;
7429 	if (epause->rx_pause)
7430 		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7431 	if (epause->tx_pause)
7432 		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7433 
7434 	if (epause->autoneg) {
7435 		bp->autoneg |= AUTONEG_FLOW_CTRL;
7436 	}
7437 	else {
7438 		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7439 	}
7440 
7441 	if (netif_running(dev)) {
7442 		spin_lock_bh(&bp->phy_lock);
7443 		bnx2_setup_phy(bp, bp->phy_port);
7444 		spin_unlock_bh(&bp->phy_lock);
7445 	}
7446 
7447 	return 0;
7448 }
7449 
7450 static struct {
7451 	char string[ETH_GSTRING_LEN];
7452 } bnx2_stats_str_arr[] = {
7453 	{ "rx_bytes" },
7454 	{ "rx_error_bytes" },
7455 	{ "tx_bytes" },
7456 	{ "tx_error_bytes" },
7457 	{ "rx_ucast_packets" },
7458 	{ "rx_mcast_packets" },
7459 	{ "rx_bcast_packets" },
7460 	{ "tx_ucast_packets" },
7461 	{ "tx_mcast_packets" },
7462 	{ "tx_bcast_packets" },
7463 	{ "tx_mac_errors" },
7464 	{ "tx_carrier_errors" },
7465 	{ "rx_crc_errors" },
7466 	{ "rx_align_errors" },
7467 	{ "tx_single_collisions" },
7468 	{ "tx_multi_collisions" },
7469 	{ "tx_deferred" },
7470 	{ "tx_excess_collisions" },
7471 	{ "tx_late_collisions" },
7472 	{ "tx_total_collisions" },
7473 	{ "rx_fragments" },
7474 	{ "rx_jabbers" },
7475 	{ "rx_undersize_packets" },
7476 	{ "rx_oversize_packets" },
7477 	{ "rx_64_byte_packets" },
7478 	{ "rx_65_to_127_byte_packets" },
7479 	{ "rx_128_to_255_byte_packets" },
7480 	{ "rx_256_to_511_byte_packets" },
7481 	{ "rx_512_to_1023_byte_packets" },
7482 	{ "rx_1024_to_1522_byte_packets" },
7483 	{ "rx_1523_to_9022_byte_packets" },
7484 	{ "tx_64_byte_packets" },
7485 	{ "tx_65_to_127_byte_packets" },
7486 	{ "tx_128_to_255_byte_packets" },
7487 	{ "tx_256_to_511_byte_packets" },
7488 	{ "tx_512_to_1023_byte_packets" },
7489 	{ "tx_1024_to_1522_byte_packets" },
7490 	{ "tx_1523_to_9022_byte_packets" },
7491 	{ "rx_xon_frames" },
7492 	{ "rx_xoff_frames" },
7493 	{ "tx_xon_frames" },
7494 	{ "tx_xoff_frames" },
7495 	{ "rx_mac_ctrl_frames" },
7496 	{ "rx_filtered_packets" },
7497 	{ "rx_ftq_discards" },
7498 	{ "rx_discards" },
7499 	{ "rx_fw_discards" },
7500 };
7501 
7502 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7503 
7504 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7505 
7506 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7507     STATS_OFFSET32(stat_IfHCInOctets_hi),
7508     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7509     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7510     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7511     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7513     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7514     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7515     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7516     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7517     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7518     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7519     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7520     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7521     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7522     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7523     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7524     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7525     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7526     STATS_OFFSET32(stat_EtherStatsCollisions),
7527     STATS_OFFSET32(stat_EtherStatsFragments),
7528     STATS_OFFSET32(stat_EtherStatsJabbers),
7529     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7530     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7541     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7542     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7543     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7544     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7545     STATS_OFFSET32(stat_XonPauseFramesReceived),
7546     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7547     STATS_OFFSET32(stat_OutXonSent),
7548     STATS_OFFSET32(stat_OutXoffSent),
7549     STATS_OFFSET32(stat_MacControlFramesReceived),
7550     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7551     STATS_OFFSET32(stat_IfInFTQDiscards),
7552     STATS_OFFSET32(stat_IfInMBUFDiscards),
7553     STATS_OFFSET32(stat_FwRxDrop),
7554 };
7555 
7556 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7557  * skipped because of errata.
7558  */
7559 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7560 	8,0,8,8,8,8,8,8,8,8,
7561 	4,0,4,4,4,4,4,4,4,4,
7562 	4,4,4,4,4,4,4,4,4,4,
7563 	4,4,4,4,4,4,4,4,4,4,
7564 	4,4,4,4,4,4,4,
7565 };
7566 
7567 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7568 	8,0,8,8,8,8,8,8,8,8,
7569 	4,4,4,4,4,4,4,4,4,4,
7570 	4,4,4,4,4,4,4,4,4,4,
7571 	4,4,4,4,4,4,4,4,4,4,
7572 	4,4,4,4,4,4,4,
7573 };
7574 
7575 #define BNX2_NUM_TESTS 6
7576 
7577 static struct {
7578 	char string[ETH_GSTRING_LEN];
7579 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7580 	{ "register_test (offline)" },
7581 	{ "memory_test (offline)" },
7582 	{ "loopback_test (offline)" },
7583 	{ "nvram_test (online)" },
7584 	{ "interrupt_test (online)" },
7585 	{ "link_test (online)" },
7586 };
7587 
7588 static int
bnx2_get_sset_count(struct net_device * dev,int sset)7589 bnx2_get_sset_count(struct net_device *dev, int sset)
7590 {
7591 	switch (sset) {
7592 	case ETH_SS_TEST:
7593 		return BNX2_NUM_TESTS;
7594 	case ETH_SS_STATS:
7595 		return BNX2_NUM_STATS;
7596 	default:
7597 		return -EOPNOTSUPP;
7598 	}
7599 }
7600 
7601 static void
bnx2_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)7602 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7603 {
7604 	struct bnx2 *bp = netdev_priv(dev);
7605 
7606 	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7607 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7608 		int i;
7609 
7610 		bnx2_netif_stop(bp, true);
7611 		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7612 		bnx2_free_skbs(bp);
7613 
7614 		if (bnx2_test_registers(bp) != 0) {
7615 			buf[0] = 1;
7616 			etest->flags |= ETH_TEST_FL_FAILED;
7617 		}
7618 		if (bnx2_test_memory(bp) != 0) {
7619 			buf[1] = 1;
7620 			etest->flags |= ETH_TEST_FL_FAILED;
7621 		}
7622 		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7623 			etest->flags |= ETH_TEST_FL_FAILED;
7624 
7625 		if (!netif_running(bp->dev))
7626 			bnx2_shutdown_chip(bp);
7627 		else {
7628 			bnx2_init_nic(bp, 1);
7629 			bnx2_netif_start(bp, true);
7630 		}
7631 
7632 		/* wait for link up */
7633 		for (i = 0; i < 7; i++) {
7634 			if (bp->link_up)
7635 				break;
7636 			msleep_interruptible(1000);
7637 		}
7638 	}
7639 
7640 	if (bnx2_test_nvram(bp) != 0) {
7641 		buf[3] = 1;
7642 		etest->flags |= ETH_TEST_FL_FAILED;
7643 	}
7644 	if (bnx2_test_intr(bp) != 0) {
7645 		buf[4] = 1;
7646 		etest->flags |= ETH_TEST_FL_FAILED;
7647 	}
7648 
7649 	if (bnx2_test_link(bp) != 0) {
7650 		buf[5] = 1;
7651 		etest->flags |= ETH_TEST_FL_FAILED;
7652 
7653 	}
7654 }
7655 
7656 static void
bnx2_get_strings(struct net_device * dev,u32 stringset,u8 * buf)7657 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7658 {
7659 	switch (stringset) {
7660 	case ETH_SS_STATS:
7661 		memcpy(buf, bnx2_stats_str_arr,
7662 			sizeof(bnx2_stats_str_arr));
7663 		break;
7664 	case ETH_SS_TEST:
7665 		memcpy(buf, bnx2_tests_str_arr,
7666 			sizeof(bnx2_tests_str_arr));
7667 		break;
7668 	}
7669 }
7670 
7671 static void
bnx2_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)7672 bnx2_get_ethtool_stats(struct net_device *dev,
7673 		struct ethtool_stats *stats, u64 *buf)
7674 {
7675 	struct bnx2 *bp = netdev_priv(dev);
7676 	int i;
7677 	u32 *hw_stats = (u32 *) bp->stats_blk;
7678 	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7679 	u8 *stats_len_arr = NULL;
7680 
7681 	if (!hw_stats) {
7682 		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7683 		return;
7684 	}
7685 
7686 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7687 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7688 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7689 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7690 		stats_len_arr = bnx2_5706_stats_len_arr;
7691 	else
7692 		stats_len_arr = bnx2_5708_stats_len_arr;
7693 
7694 	for (i = 0; i < BNX2_NUM_STATS; i++) {
7695 		unsigned long offset;
7696 
7697 		if (stats_len_arr[i] == 0) {
7698 			/* skip this counter */
7699 			buf[i] = 0;
7700 			continue;
7701 		}
7702 
7703 		offset = bnx2_stats_offset_arr[i];
7704 		if (stats_len_arr[i] == 4) {
7705 			/* 4-byte counter */
7706 			buf[i] = (u64) *(hw_stats + offset) +
7707 				 *(temp_stats + offset);
7708 			continue;
7709 		}
7710 		/* 8-byte counter */
7711 		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7712 			 *(hw_stats + offset + 1) +
7713 			 (((u64) *(temp_stats + offset)) << 32) +
7714 			 *(temp_stats + offset + 1);
7715 	}
7716 }
7717 
7718 static int
bnx2_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)7719 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7720 {
7721 	struct bnx2 *bp = netdev_priv(dev);
7722 
7723 	switch (state) {
7724 	case ETHTOOL_ID_ACTIVE:
7725 		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7726 		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7727 		return 1;	/* cycle on/off once per second */
7728 
7729 	case ETHTOOL_ID_ON:
7730 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7731 			BNX2_EMAC_LED_1000MB_OVERRIDE |
7732 			BNX2_EMAC_LED_100MB_OVERRIDE |
7733 			BNX2_EMAC_LED_10MB_OVERRIDE |
7734 			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7735 			BNX2_EMAC_LED_TRAFFIC);
7736 		break;
7737 
7738 	case ETHTOOL_ID_OFF:
7739 		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7740 		break;
7741 
7742 	case ETHTOOL_ID_INACTIVE:
7743 		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7744 		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7745 		break;
7746 	}
7747 
7748 	return 0;
7749 }
7750 
7751 static int
bnx2_set_features(struct net_device * dev,netdev_features_t features)7752 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7753 {
7754 	struct bnx2 *bp = netdev_priv(dev);
7755 
7756 	/* TSO with VLAN tag won't work with current firmware */
7757 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7758 		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7759 	else
7760 		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7761 
7762 	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7763 	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7764 	    netif_running(dev)) {
7765 		bnx2_netif_stop(bp, false);
7766 		dev->features = features;
7767 		bnx2_set_rx_mode(dev);
7768 		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7769 		bnx2_netif_start(bp, false);
7770 		return 1;
7771 	}
7772 
7773 	return 0;
7774 }
7775 
bnx2_get_channels(struct net_device * dev,struct ethtool_channels * channels)7776 static void bnx2_get_channels(struct net_device *dev,
7777 			      struct ethtool_channels *channels)
7778 {
7779 	struct bnx2 *bp = netdev_priv(dev);
7780 	u32 max_rx_rings = 1;
7781 	u32 max_tx_rings = 1;
7782 
7783 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7784 		max_rx_rings = RX_MAX_RINGS;
7785 		max_tx_rings = TX_MAX_RINGS;
7786 	}
7787 
7788 	channels->max_rx = max_rx_rings;
7789 	channels->max_tx = max_tx_rings;
7790 	channels->max_other = 0;
7791 	channels->max_combined = 0;
7792 	channels->rx_count = bp->num_rx_rings;
7793 	channels->tx_count = bp->num_tx_rings;
7794 	channels->other_count = 0;
7795 	channels->combined_count = 0;
7796 }
7797 
bnx2_set_channels(struct net_device * dev,struct ethtool_channels * channels)7798 static int bnx2_set_channels(struct net_device *dev,
7799 			      struct ethtool_channels *channels)
7800 {
7801 	struct bnx2 *bp = netdev_priv(dev);
7802 	u32 max_rx_rings = 1;
7803 	u32 max_tx_rings = 1;
7804 	int rc = 0;
7805 
7806 	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7807 		max_rx_rings = RX_MAX_RINGS;
7808 		max_tx_rings = TX_MAX_RINGS;
7809 	}
7810 	if (channels->rx_count > max_rx_rings ||
7811 	    channels->tx_count > max_tx_rings)
7812 		return -EINVAL;
7813 
7814 	bp->num_req_rx_rings = channels->rx_count;
7815 	bp->num_req_tx_rings = channels->tx_count;
7816 
7817 	if (netif_running(dev))
7818 		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7819 					   bp->tx_ring_size, true);
7820 
7821 	return rc;
7822 }
7823 
7824 static const struct ethtool_ops bnx2_ethtool_ops = {
7825 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7826 				     ETHTOOL_COALESCE_MAX_FRAMES |
7827 				     ETHTOOL_COALESCE_USECS_IRQ |
7828 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7829 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7830 	.get_drvinfo		= bnx2_get_drvinfo,
7831 	.get_regs_len		= bnx2_get_regs_len,
7832 	.get_regs		= bnx2_get_regs,
7833 	.get_wol		= bnx2_get_wol,
7834 	.set_wol		= bnx2_set_wol,
7835 	.nway_reset		= bnx2_nway_reset,
7836 	.get_link		= bnx2_get_link,
7837 	.get_eeprom_len		= bnx2_get_eeprom_len,
7838 	.get_eeprom		= bnx2_get_eeprom,
7839 	.set_eeprom		= bnx2_set_eeprom,
7840 	.get_coalesce		= bnx2_get_coalesce,
7841 	.set_coalesce		= bnx2_set_coalesce,
7842 	.get_ringparam		= bnx2_get_ringparam,
7843 	.set_ringparam		= bnx2_set_ringparam,
7844 	.get_pauseparam		= bnx2_get_pauseparam,
7845 	.set_pauseparam		= bnx2_set_pauseparam,
7846 	.self_test		= bnx2_self_test,
7847 	.get_strings		= bnx2_get_strings,
7848 	.set_phys_id		= bnx2_set_phys_id,
7849 	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7850 	.get_sset_count		= bnx2_get_sset_count,
7851 	.get_channels		= bnx2_get_channels,
7852 	.set_channels		= bnx2_set_channels,
7853 	.get_link_ksettings	= bnx2_get_link_ksettings,
7854 	.set_link_ksettings	= bnx2_set_link_ksettings,
7855 };
7856 
7857 /* Called with rtnl_lock */
7858 static int
bnx2_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7859 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7860 {
7861 	struct mii_ioctl_data *data = if_mii(ifr);
7862 	struct bnx2 *bp = netdev_priv(dev);
7863 	int err;
7864 
7865 	switch(cmd) {
7866 	case SIOCGMIIPHY:
7867 		data->phy_id = bp->phy_addr;
7868 
7869 		fallthrough;
7870 	case SIOCGMIIREG: {
7871 		u32 mii_regval;
7872 
7873 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7874 			return -EOPNOTSUPP;
7875 
7876 		if (!netif_running(dev))
7877 			return -EAGAIN;
7878 
7879 		spin_lock_bh(&bp->phy_lock);
7880 		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7881 		spin_unlock_bh(&bp->phy_lock);
7882 
7883 		data->val_out = mii_regval;
7884 
7885 		return err;
7886 	}
7887 
7888 	case SIOCSMIIREG:
7889 		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7890 			return -EOPNOTSUPP;
7891 
7892 		if (!netif_running(dev))
7893 			return -EAGAIN;
7894 
7895 		spin_lock_bh(&bp->phy_lock);
7896 		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7897 		spin_unlock_bh(&bp->phy_lock);
7898 
7899 		return err;
7900 
7901 	default:
7902 		/* do nothing */
7903 		break;
7904 	}
7905 	return -EOPNOTSUPP;
7906 }
7907 
7908 /* Called with rtnl_lock */
7909 static int
bnx2_change_mac_addr(struct net_device * dev,void * p)7910 bnx2_change_mac_addr(struct net_device *dev, void *p)
7911 {
7912 	struct sockaddr *addr = p;
7913 	struct bnx2 *bp = netdev_priv(dev);
7914 
7915 	if (!is_valid_ether_addr(addr->sa_data))
7916 		return -EADDRNOTAVAIL;
7917 
7918 	eth_hw_addr_set(dev, addr->sa_data);
7919 	if (netif_running(dev))
7920 		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7921 
7922 	return 0;
7923 }
7924 
7925 /* Called with rtnl_lock */
7926 static int
bnx2_change_mtu(struct net_device * dev,int new_mtu)7927 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7928 {
7929 	struct bnx2 *bp = netdev_priv(dev);
7930 
7931 	dev->mtu = new_mtu;
7932 	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7933 				     false);
7934 }
7935 
7936 #ifdef CONFIG_NET_POLL_CONTROLLER
7937 static void
poll_bnx2(struct net_device * dev)7938 poll_bnx2(struct net_device *dev)
7939 {
7940 	struct bnx2 *bp = netdev_priv(dev);
7941 	int i;
7942 
7943 	for (i = 0; i < bp->irq_nvecs; i++) {
7944 		struct bnx2_irq *irq = &bp->irq_tbl[i];
7945 
7946 		disable_irq(irq->vector);
7947 		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7948 		enable_irq(irq->vector);
7949 	}
7950 }
7951 #endif
7952 
7953 static void
bnx2_get_5709_media(struct bnx2 * bp)7954 bnx2_get_5709_media(struct bnx2 *bp)
7955 {
7956 	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7957 	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7958 	u32 strap;
7959 
7960 	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7961 		return;
7962 	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7963 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7964 		return;
7965 	}
7966 
7967 	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7968 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7969 	else
7970 		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7971 
7972 	if (bp->func == 0) {
7973 		switch (strap) {
7974 		case 0x4:
7975 		case 0x5:
7976 		case 0x6:
7977 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7978 			return;
7979 		}
7980 	} else {
7981 		switch (strap) {
7982 		case 0x1:
7983 		case 0x2:
7984 		case 0x4:
7985 			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7986 			return;
7987 		}
7988 	}
7989 }
7990 
7991 static void
bnx2_get_pci_speed(struct bnx2 * bp)7992 bnx2_get_pci_speed(struct bnx2 *bp)
7993 {
7994 	u32 reg;
7995 
7996 	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7997 	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7998 		u32 clkreg;
7999 
8000 		bp->flags |= BNX2_FLAG_PCIX;
8001 
8002 		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
8003 
8004 		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
8005 		switch (clkreg) {
8006 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
8007 			bp->bus_speed_mhz = 133;
8008 			break;
8009 
8010 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8011 			bp->bus_speed_mhz = 100;
8012 			break;
8013 
8014 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8015 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8016 			bp->bus_speed_mhz = 66;
8017 			break;
8018 
8019 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8020 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8021 			bp->bus_speed_mhz = 50;
8022 			break;
8023 
8024 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8025 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8026 		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8027 			bp->bus_speed_mhz = 33;
8028 			break;
8029 		}
8030 	}
8031 	else {
8032 		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8033 			bp->bus_speed_mhz = 66;
8034 		else
8035 			bp->bus_speed_mhz = 33;
8036 	}
8037 
8038 	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8039 		bp->flags |= BNX2_FLAG_PCI_32BIT;
8040 
8041 }
8042 
8043 static void
bnx2_read_vpd_fw_ver(struct bnx2 * bp)8044 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8045 {
8046 	unsigned int len;
8047 	int rc, i, j;
8048 	u8 *data;
8049 
8050 #define BNX2_VPD_NVRAM_OFFSET	0x300
8051 #define BNX2_VPD_LEN		128
8052 #define BNX2_MAX_VER_SLEN	30
8053 
8054 	data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8055 	if (!data)
8056 		return;
8057 
8058 	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8059 	if (rc)
8060 		goto vpd_done;
8061 
8062 	for (i = 0; i < BNX2_VPD_LEN; i += 4)
8063 		swab32s((u32 *)&data[i]);
8064 
8065 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8066 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
8067 	if (j < 0)
8068 		goto vpd_done;
8069 
8070 	if (len != 4 || memcmp(&data[j], "1028", 4))
8071 		goto vpd_done;
8072 
8073 	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8074 					 PCI_VPD_RO_KEYWORD_VENDOR0,
8075 					 &len);
8076 	if (j < 0)
8077 		goto vpd_done;
8078 
8079 	if (len > BNX2_MAX_VER_SLEN)
8080 		goto vpd_done;
8081 
8082 	memcpy(bp->fw_version, &data[j], len);
8083 	bp->fw_version[len] = ' ';
8084 
8085 vpd_done:
8086 	kfree(data);
8087 }
8088 
8089 static int
bnx2_init_board(struct pci_dev * pdev,struct net_device * dev)8090 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8091 {
8092 	struct bnx2 *bp;
8093 	int rc, i, j;
8094 	u32 reg;
8095 	u64 dma_mask, persist_dma_mask;
8096 	int err;
8097 
8098 	SET_NETDEV_DEV(dev, &pdev->dev);
8099 	bp = netdev_priv(dev);
8100 
8101 	bp->flags = 0;
8102 	bp->phy_flags = 0;
8103 
8104 	bp->temp_stats_blk =
8105 		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8106 
8107 	if (!bp->temp_stats_blk) {
8108 		rc = -ENOMEM;
8109 		goto err_out;
8110 	}
8111 
8112 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8113 	rc = pci_enable_device(pdev);
8114 	if (rc) {
8115 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8116 		goto err_out;
8117 	}
8118 
8119 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8120 		dev_err(&pdev->dev,
8121 			"Cannot find PCI device base address, aborting\n");
8122 		rc = -ENODEV;
8123 		goto err_out_disable;
8124 	}
8125 
8126 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8127 	if (rc) {
8128 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8129 		goto err_out_disable;
8130 	}
8131 
8132 	pci_set_master(pdev);
8133 
8134 	bp->pm_cap = pdev->pm_cap;
8135 	if (bp->pm_cap == 0) {
8136 		dev_err(&pdev->dev,
8137 			"Cannot find power management capability, aborting\n");
8138 		rc = -EIO;
8139 		goto err_out_release;
8140 	}
8141 
8142 	bp->dev = dev;
8143 	bp->pdev = pdev;
8144 
8145 	spin_lock_init(&bp->phy_lock);
8146 	spin_lock_init(&bp->indirect_lock);
8147 #ifdef BCM_CNIC
8148 	mutex_init(&bp->cnic_lock);
8149 #endif
8150 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8151 
8152 	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8153 							 TX_MAX_TSS_RINGS + 1));
8154 	if (!bp->regview) {
8155 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8156 		rc = -ENOMEM;
8157 		goto err_out_release;
8158 	}
8159 
8160 	/* Configure byte swap and enable write to the reg_window registers.
8161 	 * Rely on CPU to do target byte swapping on big endian systems
8162 	 * The chip's target access swapping will not swap all accesses
8163 	 */
8164 	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8165 		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8166 		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8167 
8168 	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8169 
8170 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8171 		if (!pci_is_pcie(pdev)) {
8172 			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8173 			rc = -EIO;
8174 			goto err_out_unmap;
8175 		}
8176 		bp->flags |= BNX2_FLAG_PCIE;
8177 		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8178 			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8179 
8180 		/* AER (Advanced Error Reporting) hooks */
8181 		err = pci_enable_pcie_error_reporting(pdev);
8182 		if (!err)
8183 			bp->flags |= BNX2_FLAG_AER_ENABLED;
8184 
8185 	} else {
8186 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8187 		if (bp->pcix_cap == 0) {
8188 			dev_err(&pdev->dev,
8189 				"Cannot find PCIX capability, aborting\n");
8190 			rc = -EIO;
8191 			goto err_out_unmap;
8192 		}
8193 		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8194 	}
8195 
8196 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8197 	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8198 		if (pdev->msix_cap)
8199 			bp->flags |= BNX2_FLAG_MSIX_CAP;
8200 	}
8201 
8202 	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8203 	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8204 		if (pdev->msi_cap)
8205 			bp->flags |= BNX2_FLAG_MSI_CAP;
8206 	}
8207 
8208 	/* 5708 cannot support DMA addresses > 40-bit.  */
8209 	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8210 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8211 	else
8212 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8213 
8214 	/* Configure DMA attributes. */
8215 	if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8216 		dev->features |= NETIF_F_HIGHDMA;
8217 		rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8218 		if (rc) {
8219 			dev_err(&pdev->dev,
8220 				"dma_set_coherent_mask failed, aborting\n");
8221 			goto err_out_unmap;
8222 		}
8223 	} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8224 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8225 		goto err_out_unmap;
8226 	}
8227 
8228 	if (!(bp->flags & BNX2_FLAG_PCIE))
8229 		bnx2_get_pci_speed(bp);
8230 
8231 	/* 5706A0 may falsely detect SERR and PERR. */
8232 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8233 		reg = BNX2_RD(bp, PCI_COMMAND);
8234 		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8235 		BNX2_WR(bp, PCI_COMMAND, reg);
8236 	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8237 		!(bp->flags & BNX2_FLAG_PCIX)) {
8238 		dev_err(&pdev->dev,
8239 			"5706 A1 can only be used in a PCIX bus, aborting\n");
8240 		rc = -EPERM;
8241 		goto err_out_unmap;
8242 	}
8243 
8244 	bnx2_init_nvram(bp);
8245 
8246 	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8247 
8248 	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8249 		bp->func = 1;
8250 
8251 	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8252 	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8253 		u32 off = bp->func << 2;
8254 
8255 		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8256 	} else
8257 		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8258 
8259 	/* Get the permanent MAC address.  First we need to make sure the
8260 	 * firmware is actually running.
8261 	 */
8262 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8263 
8264 	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8265 	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8266 		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8267 		rc = -ENODEV;
8268 		goto err_out_unmap;
8269 	}
8270 
8271 	bnx2_read_vpd_fw_ver(bp);
8272 
8273 	j = strlen(bp->fw_version);
8274 	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8275 	for (i = 0; i < 3 && j < 24; i++) {
8276 		u8 num, k, skip0;
8277 
8278 		if (i == 0) {
8279 			bp->fw_version[j++] = 'b';
8280 			bp->fw_version[j++] = 'c';
8281 			bp->fw_version[j++] = ' ';
8282 		}
8283 		num = (u8) (reg >> (24 - (i * 8)));
8284 		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8285 			if (num >= k || !skip0 || k == 1) {
8286 				bp->fw_version[j++] = (num / k) + '0';
8287 				skip0 = 0;
8288 			}
8289 		}
8290 		if (i != 2)
8291 			bp->fw_version[j++] = '.';
8292 	}
8293 	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8294 	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8295 		bp->wol = 1;
8296 
8297 	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8298 		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8299 
8300 		for (i = 0; i < 30; i++) {
8301 			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8302 			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8303 				break;
8304 			msleep(10);
8305 		}
8306 	}
8307 	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8308 	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8309 	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8310 	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8311 		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8312 
8313 		if (j < 32)
8314 			bp->fw_version[j++] = ' ';
8315 		for (i = 0; i < 3 && j < 28; i++) {
8316 			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8317 			reg = be32_to_cpu(reg);
8318 			memcpy(&bp->fw_version[j], &reg, 4);
8319 			j += 4;
8320 		}
8321 	}
8322 
8323 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8324 	bp->mac_addr[0] = (u8) (reg >> 8);
8325 	bp->mac_addr[1] = (u8) reg;
8326 
8327 	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8328 	bp->mac_addr[2] = (u8) (reg >> 24);
8329 	bp->mac_addr[3] = (u8) (reg >> 16);
8330 	bp->mac_addr[4] = (u8) (reg >> 8);
8331 	bp->mac_addr[5] = (u8) reg;
8332 
8333 	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8334 	bnx2_set_rx_ring_size(bp, 255);
8335 
8336 	bp->tx_quick_cons_trip_int = 2;
8337 	bp->tx_quick_cons_trip = 20;
8338 	bp->tx_ticks_int = 18;
8339 	bp->tx_ticks = 80;
8340 
8341 	bp->rx_quick_cons_trip_int = 2;
8342 	bp->rx_quick_cons_trip = 12;
8343 	bp->rx_ticks_int = 18;
8344 	bp->rx_ticks = 18;
8345 
8346 	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8347 
8348 	bp->current_interval = BNX2_TIMER_INTERVAL;
8349 
8350 	bp->phy_addr = 1;
8351 
8352 	/* allocate stats_blk */
8353 	rc = bnx2_alloc_stats_blk(dev);
8354 	if (rc)
8355 		goto err_out_unmap;
8356 
8357 	/* Disable WOL support if we are running on a SERDES chip. */
8358 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8359 		bnx2_get_5709_media(bp);
8360 	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8361 		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8362 
8363 	bp->phy_port = PORT_TP;
8364 	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8365 		bp->phy_port = PORT_FIBRE;
8366 		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8367 		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8368 			bp->flags |= BNX2_FLAG_NO_WOL;
8369 			bp->wol = 0;
8370 		}
8371 		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8372 			/* Don't do parallel detect on this board because of
8373 			 * some board problems.  The link will not go down
8374 			 * if we do parallel detect.
8375 			 */
8376 			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8377 			    pdev->subsystem_device == 0x310c)
8378 				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8379 		} else {
8380 			bp->phy_addr = 2;
8381 			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8382 				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8383 		}
8384 	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8385 		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8386 		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8387 	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8388 		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8389 		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8390 		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8391 
8392 	bnx2_init_fw_cap(bp);
8393 
8394 	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8395 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8396 	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8397 	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8398 		bp->flags |= BNX2_FLAG_NO_WOL;
8399 		bp->wol = 0;
8400 	}
8401 
8402 	if (bp->flags & BNX2_FLAG_NO_WOL)
8403 		device_set_wakeup_capable(&bp->pdev->dev, false);
8404 	else
8405 		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8406 
8407 	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8408 		bp->tx_quick_cons_trip_int =
8409 			bp->tx_quick_cons_trip;
8410 		bp->tx_ticks_int = bp->tx_ticks;
8411 		bp->rx_quick_cons_trip_int =
8412 			bp->rx_quick_cons_trip;
8413 		bp->rx_ticks_int = bp->rx_ticks;
8414 		bp->comp_prod_trip_int = bp->comp_prod_trip;
8415 		bp->com_ticks_int = bp->com_ticks;
8416 		bp->cmd_ticks_int = bp->cmd_ticks;
8417 	}
8418 
8419 	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8420 	 *
8421 	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8422 	 * with byte enables disabled on the unused 32-bit word.  This is legal
8423 	 * but causes problems on the AMD 8132 which will eventually stop
8424 	 * responding after a while.
8425 	 *
8426 	 * AMD believes this incompatibility is unique to the 5706, and
8427 	 * prefers to locally disable MSI rather than globally disabling it.
8428 	 */
8429 	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8430 		struct pci_dev *amd_8132 = NULL;
8431 
8432 		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8433 						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8434 						  amd_8132))) {
8435 
8436 			if (amd_8132->revision >= 0x10 &&
8437 			    amd_8132->revision <= 0x13) {
8438 				disable_msi = 1;
8439 				pci_dev_put(amd_8132);
8440 				break;
8441 			}
8442 		}
8443 	}
8444 
8445 	bnx2_set_default_link(bp);
8446 	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8447 
8448 	timer_setup(&bp->timer, bnx2_timer, 0);
8449 	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8450 
8451 #ifdef BCM_CNIC
8452 	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8453 		bp->cnic_eth_dev.max_iscsi_conn =
8454 			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8455 			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8456 	bp->cnic_probe = bnx2_cnic_probe;
8457 #endif
8458 	pci_save_state(pdev);
8459 
8460 	return 0;
8461 
8462 err_out_unmap:
8463 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8464 		pci_disable_pcie_error_reporting(pdev);
8465 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8466 	}
8467 
8468 	pci_iounmap(pdev, bp->regview);
8469 	bp->regview = NULL;
8470 
8471 err_out_release:
8472 	pci_release_regions(pdev);
8473 
8474 err_out_disable:
8475 	pci_disable_device(pdev);
8476 
8477 err_out:
8478 	kfree(bp->temp_stats_blk);
8479 
8480 	return rc;
8481 }
8482 
8483 static char *
bnx2_bus_string(struct bnx2 * bp,char * str)8484 bnx2_bus_string(struct bnx2 *bp, char *str)
8485 {
8486 	char *s = str;
8487 
8488 	if (bp->flags & BNX2_FLAG_PCIE) {
8489 		s += sprintf(s, "PCI Express");
8490 	} else {
8491 		s += sprintf(s, "PCI");
8492 		if (bp->flags & BNX2_FLAG_PCIX)
8493 			s += sprintf(s, "-X");
8494 		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8495 			s += sprintf(s, " 32-bit");
8496 		else
8497 			s += sprintf(s, " 64-bit");
8498 		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8499 	}
8500 	return str;
8501 }
8502 
8503 static void
bnx2_del_napi(struct bnx2 * bp)8504 bnx2_del_napi(struct bnx2 *bp)
8505 {
8506 	int i;
8507 
8508 	for (i = 0; i < bp->irq_nvecs; i++)
8509 		netif_napi_del(&bp->bnx2_napi[i].napi);
8510 }
8511 
8512 static void
bnx2_init_napi(struct bnx2 * bp)8513 bnx2_init_napi(struct bnx2 *bp)
8514 {
8515 	int i;
8516 
8517 	for (i = 0; i < bp->irq_nvecs; i++) {
8518 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8519 		int (*poll)(struct napi_struct *, int);
8520 
8521 		if (i == 0)
8522 			poll = bnx2_poll;
8523 		else
8524 			poll = bnx2_poll_msix;
8525 
8526 		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8527 		bnapi->bp = bp;
8528 	}
8529 }
8530 
8531 static const struct net_device_ops bnx2_netdev_ops = {
8532 	.ndo_open		= bnx2_open,
8533 	.ndo_start_xmit		= bnx2_start_xmit,
8534 	.ndo_stop		= bnx2_close,
8535 	.ndo_get_stats64	= bnx2_get_stats64,
8536 	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8537 	.ndo_eth_ioctl		= bnx2_ioctl,
8538 	.ndo_validate_addr	= eth_validate_addr,
8539 	.ndo_set_mac_address	= bnx2_change_mac_addr,
8540 	.ndo_change_mtu		= bnx2_change_mtu,
8541 	.ndo_set_features	= bnx2_set_features,
8542 	.ndo_tx_timeout		= bnx2_tx_timeout,
8543 #ifdef CONFIG_NET_POLL_CONTROLLER
8544 	.ndo_poll_controller	= poll_bnx2,
8545 #endif
8546 };
8547 
8548 static int
bnx2_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8549 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8550 {
8551 	struct net_device *dev;
8552 	struct bnx2 *bp;
8553 	int rc;
8554 	char str[40];
8555 
8556 	/* dev zeroed in init_etherdev */
8557 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8558 	if (!dev)
8559 		return -ENOMEM;
8560 
8561 	rc = bnx2_init_board(pdev, dev);
8562 	if (rc < 0)
8563 		goto err_free;
8564 
8565 	dev->netdev_ops = &bnx2_netdev_ops;
8566 	dev->watchdog_timeo = TX_TIMEOUT;
8567 	dev->ethtool_ops = &bnx2_ethtool_ops;
8568 
8569 	bp = netdev_priv(dev);
8570 
8571 	pci_set_drvdata(pdev, dev);
8572 
8573 	/*
8574 	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8575 	 * New io-page table has been created before bnx2 does reset at open stage.
8576 	 * We have to wait for the in-flight DMA to complete to avoid it look up
8577 	 * into the newly created io-page table.
8578 	 */
8579 	if (is_kdump_kernel())
8580 		bnx2_wait_dma_complete(bp);
8581 
8582 	eth_hw_addr_set(dev, bp->mac_addr);
8583 
8584 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8585 		NETIF_F_TSO | NETIF_F_TSO_ECN |
8586 		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8587 
8588 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8589 		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8590 
8591 	dev->vlan_features = dev->hw_features;
8592 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8593 	dev->features |= dev->hw_features;
8594 	dev->priv_flags |= IFF_UNICAST_FLT;
8595 	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8596 	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8597 
8598 	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8599 		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8600 
8601 	if ((rc = register_netdev(dev))) {
8602 		dev_err(&pdev->dev, "Cannot register net device\n");
8603 		goto error;
8604 	}
8605 
8606 	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8607 		    "node addr %pM\n", board_info[ent->driver_data].name,
8608 		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8609 		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8610 		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8611 		    pdev->irq, dev->dev_addr);
8612 
8613 	return 0;
8614 
8615 error:
8616 	pci_iounmap(pdev, bp->regview);
8617 	pci_release_regions(pdev);
8618 	pci_disable_device(pdev);
8619 err_free:
8620 	bnx2_free_stats_blk(dev);
8621 	free_netdev(dev);
8622 	return rc;
8623 }
8624 
8625 static void
bnx2_remove_one(struct pci_dev * pdev)8626 bnx2_remove_one(struct pci_dev *pdev)
8627 {
8628 	struct net_device *dev = pci_get_drvdata(pdev);
8629 	struct bnx2 *bp = netdev_priv(dev);
8630 
8631 	unregister_netdev(dev);
8632 
8633 	del_timer_sync(&bp->timer);
8634 	cancel_work_sync(&bp->reset_task);
8635 
8636 	pci_iounmap(bp->pdev, bp->regview);
8637 
8638 	bnx2_free_stats_blk(dev);
8639 	kfree(bp->temp_stats_blk);
8640 
8641 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8642 		pci_disable_pcie_error_reporting(pdev);
8643 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8644 	}
8645 
8646 	bnx2_release_firmware(bp);
8647 
8648 	free_netdev(dev);
8649 
8650 	pci_release_regions(pdev);
8651 	pci_disable_device(pdev);
8652 }
8653 
8654 #ifdef CONFIG_PM_SLEEP
8655 static int
bnx2_suspend(struct device * device)8656 bnx2_suspend(struct device *device)
8657 {
8658 	struct net_device *dev = dev_get_drvdata(device);
8659 	struct bnx2 *bp = netdev_priv(dev);
8660 
8661 	if (netif_running(dev)) {
8662 		cancel_work_sync(&bp->reset_task);
8663 		bnx2_netif_stop(bp, true);
8664 		netif_device_detach(dev);
8665 		del_timer_sync(&bp->timer);
8666 		bnx2_shutdown_chip(bp);
8667 		__bnx2_free_irq(bp);
8668 		bnx2_free_skbs(bp);
8669 	}
8670 	bnx2_setup_wol(bp);
8671 	return 0;
8672 }
8673 
8674 static int
bnx2_resume(struct device * device)8675 bnx2_resume(struct device *device)
8676 {
8677 	struct net_device *dev = dev_get_drvdata(device);
8678 	struct bnx2 *bp = netdev_priv(dev);
8679 
8680 	if (!netif_running(dev))
8681 		return 0;
8682 
8683 	bnx2_set_power_state(bp, PCI_D0);
8684 	netif_device_attach(dev);
8685 	bnx2_request_irq(bp);
8686 	bnx2_init_nic(bp, 1);
8687 	bnx2_netif_start(bp, true);
8688 	return 0;
8689 }
8690 
8691 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8692 #define BNX2_PM_OPS (&bnx2_pm_ops)
8693 
8694 #else
8695 
8696 #define BNX2_PM_OPS NULL
8697 
8698 #endif /* CONFIG_PM_SLEEP */
8699 /**
8700  * bnx2_io_error_detected - called when PCI error is detected
8701  * @pdev: Pointer to PCI device
8702  * @state: The current pci connection state
8703  *
8704  * This function is called after a PCI bus error affecting
8705  * this device has been detected.
8706  */
bnx2_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8707 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8708 					       pci_channel_state_t state)
8709 {
8710 	struct net_device *dev = pci_get_drvdata(pdev);
8711 	struct bnx2 *bp = netdev_priv(dev);
8712 
8713 	rtnl_lock();
8714 	netif_device_detach(dev);
8715 
8716 	if (state == pci_channel_io_perm_failure) {
8717 		rtnl_unlock();
8718 		return PCI_ERS_RESULT_DISCONNECT;
8719 	}
8720 
8721 	if (netif_running(dev)) {
8722 		bnx2_netif_stop(bp, true);
8723 		del_timer_sync(&bp->timer);
8724 		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8725 	}
8726 
8727 	pci_disable_device(pdev);
8728 	rtnl_unlock();
8729 
8730 	/* Request a slot slot reset. */
8731 	return PCI_ERS_RESULT_NEED_RESET;
8732 }
8733 
8734 /**
8735  * bnx2_io_slot_reset - called after the pci bus has been reset.
8736  * @pdev: Pointer to PCI device
8737  *
8738  * Restart the card from scratch, as if from a cold-boot.
8739  */
bnx2_io_slot_reset(struct pci_dev * pdev)8740 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8741 {
8742 	struct net_device *dev = pci_get_drvdata(pdev);
8743 	struct bnx2 *bp = netdev_priv(dev);
8744 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8745 	int err = 0;
8746 
8747 	rtnl_lock();
8748 	if (pci_enable_device(pdev)) {
8749 		dev_err(&pdev->dev,
8750 			"Cannot re-enable PCI device after reset\n");
8751 	} else {
8752 		pci_set_master(pdev);
8753 		pci_restore_state(pdev);
8754 		pci_save_state(pdev);
8755 
8756 		if (netif_running(dev))
8757 			err = bnx2_init_nic(bp, 1);
8758 
8759 		if (!err)
8760 			result = PCI_ERS_RESULT_RECOVERED;
8761 	}
8762 
8763 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8764 		bnx2_napi_enable(bp);
8765 		dev_close(dev);
8766 	}
8767 	rtnl_unlock();
8768 
8769 	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8770 		return result;
8771 
8772 	return result;
8773 }
8774 
8775 /**
8776  * bnx2_io_resume - called when traffic can start flowing again.
8777  * @pdev: Pointer to PCI device
8778  *
8779  * This callback is called when the error recovery driver tells us that
8780  * its OK to resume normal operation.
8781  */
bnx2_io_resume(struct pci_dev * pdev)8782 static void bnx2_io_resume(struct pci_dev *pdev)
8783 {
8784 	struct net_device *dev = pci_get_drvdata(pdev);
8785 	struct bnx2 *bp = netdev_priv(dev);
8786 
8787 	rtnl_lock();
8788 	if (netif_running(dev))
8789 		bnx2_netif_start(bp, true);
8790 
8791 	netif_device_attach(dev);
8792 	rtnl_unlock();
8793 }
8794 
bnx2_shutdown(struct pci_dev * pdev)8795 static void bnx2_shutdown(struct pci_dev *pdev)
8796 {
8797 	struct net_device *dev = pci_get_drvdata(pdev);
8798 	struct bnx2 *bp;
8799 
8800 	if (!dev)
8801 		return;
8802 
8803 	bp = netdev_priv(dev);
8804 	if (!bp)
8805 		return;
8806 
8807 	rtnl_lock();
8808 	if (netif_running(dev))
8809 		dev_close(bp->dev);
8810 
8811 	if (system_state == SYSTEM_POWER_OFF)
8812 		bnx2_set_power_state(bp, PCI_D3hot);
8813 
8814 	rtnl_unlock();
8815 }
8816 
8817 static const struct pci_error_handlers bnx2_err_handler = {
8818 	.error_detected	= bnx2_io_error_detected,
8819 	.slot_reset	= bnx2_io_slot_reset,
8820 	.resume		= bnx2_io_resume,
8821 };
8822 
8823 static struct pci_driver bnx2_pci_driver = {
8824 	.name		= DRV_MODULE_NAME,
8825 	.id_table	= bnx2_pci_tbl,
8826 	.probe		= bnx2_init_one,
8827 	.remove		= bnx2_remove_one,
8828 	.driver.pm	= BNX2_PM_OPS,
8829 	.err_handler	= &bnx2_err_handler,
8830 	.shutdown	= bnx2_shutdown,
8831 };
8832 
8833 module_pci_driver(bnx2_pci_driver);
8834