1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic qlge NIC HBA Driver
4  * Copyright (c)  2003-2008 QLogic Corporation
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43 
44 #include "qlge.h"
45 #include "qlge_devlink.h"
46 
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49 
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54 
55 static const u32 default_msg =
56 	NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 	NETIF_MSG_IFDOWN |
58 	NETIF_MSG_IFUP |
59 	NETIF_MSG_RX_ERR |
60 	NETIF_MSG_TX_ERR |
61 	NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62 
63 static int debug = -1;	/* defaults above */
64 module_param(debug, int, 0664);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66 
67 #define MSIX_IRQ 0
68 #define MSI_IRQ 1
69 #define LEG_IRQ 2
70 static int qlge_irq_type = MSIX_IRQ;
71 module_param(qlge_irq_type, int, 0664);
72 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73 
74 static int qlge_mpi_coredump;
75 module_param(qlge_mpi_coredump, int, 0);
76 MODULE_PARM_DESC(qlge_mpi_coredump,
77 		 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78 
79 static int qlge_force_coredump;
80 module_param(qlge_force_coredump, int, 0);
81 MODULE_PARM_DESC(qlge_force_coredump,
82 		 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83 
84 static const struct pci_device_id qlge_pci_tbl[] = {
85 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87 	/* required last entry */
88 	{0,}
89 };
90 
91 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92 
93 static int qlge_wol(struct qlge_adapter *);
94 static void qlge_set_multicast_list(struct net_device *);
95 static int qlge_adapter_down(struct qlge_adapter *);
96 static int qlge_adapter_up(struct qlge_adapter *);
97 
98 /* This hardware semaphore causes exclusive access to
99  * resources shared between the NIC driver, MPI firmware,
100  * FCOE firmware and the FC driver.
101  */
qlge_sem_trylock(struct qlge_adapter * qdev,u32 sem_mask)102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
103 {
104 	u32 sem_bits = 0;
105 
106 	switch (sem_mask) {
107 	case SEM_XGMAC0_MASK:
108 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 		break;
110 	case SEM_XGMAC1_MASK:
111 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
112 		break;
113 	case SEM_ICB_MASK:
114 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 		break;
116 	case SEM_MAC_ADDR_MASK:
117 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
118 		break;
119 	case SEM_FLASH_MASK:
120 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
121 		break;
122 	case SEM_PROBE_MASK:
123 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 		break;
125 	case SEM_RT_IDX_MASK:
126 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 		break;
128 	case SEM_PROC_REG_MASK:
129 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
130 		break;
131 	default:
132 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
133 		return -EINVAL;
134 	}
135 
136 	qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 	return !(qlge_read32(qdev, SEM) & sem_bits);
138 }
139 
qlge_sem_spinlock(struct qlge_adapter * qdev,u32 sem_mask)140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
141 {
142 	unsigned int wait_count = 30;
143 
144 	do {
145 		if (!qlge_sem_trylock(qdev, sem_mask))
146 			return 0;
147 		udelay(100);
148 	} while (--wait_count);
149 	return -ETIMEDOUT;
150 }
151 
qlge_sem_unlock(struct qlge_adapter * qdev,u32 sem_mask)152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
153 {
154 	qlge_write32(qdev, SEM, sem_mask);
155 	qlge_read32(qdev, SEM);	/* flush */
156 }
157 
158 /* This function waits for a specific bit to come ready
159  * in a given register.  It is used mostly by the initialize
160  * process, but is also used in kernel thread API such as
161  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
162  */
qlge_wait_reg_rdy(struct qlge_adapter * qdev,u32 reg,u32 bit,u32 err_bit)163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164 {
165 	u32 temp;
166 	int count;
167 
168 	for (count = 0; count < UDELAY_COUNT; count++) {
169 		temp = qlge_read32(qdev, reg);
170 
171 		/* check for errors */
172 		if (temp & err_bit) {
173 			netif_alert(qdev, probe, qdev->ndev,
174 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
175 				    reg, temp);
176 			return -EIO;
177 		} else if (temp & bit) {
178 			return 0;
179 		}
180 		udelay(UDELAY_DELAY);
181 	}
182 	netif_alert(qdev, probe, qdev->ndev,
183 		    "Timed out waiting for reg %x to come ready.\n", reg);
184 	return -ETIMEDOUT;
185 }
186 
187 /* The CFG register is used to download TX and RX control blocks
188  * to the chip. This function waits for an operation to complete.
189  */
qlge_wait_cfg(struct qlge_adapter * qdev,u32 bit)190 static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
191 {
192 	int count;
193 	u32 temp;
194 
195 	for (count = 0; count < UDELAY_COUNT; count++) {
196 		temp = qlge_read32(qdev, CFG);
197 		if (temp & CFG_LE)
198 			return -EIO;
199 		if (!(temp & bit))
200 			return 0;
201 		udelay(UDELAY_DELAY);
202 	}
203 	return -ETIMEDOUT;
204 }
205 
206 /* Used to issue init control blocks to hw. Maps control block,
207  * sets address, triggers download, waits for completion.
208  */
qlge_write_cfg(struct qlge_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)209 int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
210 		   u16 q_id)
211 {
212 	u64 map;
213 	int status = 0;
214 	int direction;
215 	u32 mask;
216 	u32 value;
217 
218 	if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 		direction = DMA_TO_DEVICE;
220 	else
221 		direction = DMA_FROM_DEVICE;
222 
223 	map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 	if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
226 		return -ENOMEM;
227 	}
228 
229 	status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
230 	if (status)
231 		goto lock_failed;
232 
233 	status = qlge_wait_cfg(qdev, bit);
234 	if (status) {
235 		netif_err(qdev, ifup, qdev->ndev,
236 			  "Timed out waiting for CFG to come ready.\n");
237 		goto exit;
238 	}
239 
240 	qlge_write32(qdev, ICB_L, (u32)map);
241 	qlge_write32(qdev, ICB_H, (u32)(map >> 32));
242 
243 	mask = CFG_Q_MASK | (bit << 16);
244 	value = bit | (q_id << CFG_Q_SHIFT);
245 	qlge_write32(qdev, CFG, (mask | value));
246 
247 	/*
248 	 * Wait for the bit to clear after signaling hw.
249 	 */
250 	status = qlge_wait_cfg(qdev, bit);
251 exit:
252 	qlge_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
253 lock_failed:
254 	dma_unmap_single(&qdev->pdev->dev, map, size, direction);
255 	return status;
256 }
257 
258 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
qlge_get_mac_addr_reg(struct qlge_adapter * qdev,u32 type,u16 index,u32 * value)259 int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
260 			  u32 *value)
261 {
262 	u32 offset = 0;
263 	int status;
264 
265 	switch (type) {
266 	case MAC_ADDR_TYPE_MULTI_MAC:
267 	case MAC_ADDR_TYPE_CAM_MAC: {
268 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
269 		if (status)
270 			break;
271 		qlge_write32(qdev, MAC_ADDR_IDX,
272 			     (offset++) | /* offset */
273 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 				   MAC_ADDR_ADR | MAC_ADDR_RS |
275 				   type); /* type */
276 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
277 		if (status)
278 			break;
279 		*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
281 		if (status)
282 			break;
283 		qlge_write32(qdev, MAC_ADDR_IDX,
284 			     (offset++) | /* offset */
285 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
286 				   MAC_ADDR_ADR | MAC_ADDR_RS |
287 				   type); /* type */
288 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
289 		if (status)
290 			break;
291 		*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 			status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294 						   MAC_ADDR_MW, 0);
295 			if (status)
296 				break;
297 			qlge_write32(qdev, MAC_ADDR_IDX,
298 				     (offset++) | /* offset */
299 					   (index
300 					    << MAC_ADDR_IDX_SHIFT) | /* index */
301 					   MAC_ADDR_ADR |
302 					   MAC_ADDR_RS | type); /* type */
303 			status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
304 						   MAC_ADDR_MR, 0);
305 			if (status)
306 				break;
307 			*value++ = qlge_read32(qdev, MAC_ADDR_DATA);
308 		}
309 		break;
310 	}
311 	case MAC_ADDR_TYPE_VLAN:
312 	case MAC_ADDR_TYPE_MULTI_FLTR:
313 	default:
314 		netif_crit(qdev, ifup, qdev->ndev,
315 			   "Address type %d not yet supported.\n", type);
316 		status = -EPERM;
317 	}
318 	return status;
319 }
320 
321 /* Set up a MAC, multicast or VLAN address for the
322  * inbound frame matching.
323  */
qlge_set_mac_addr_reg(struct qlge_adapter * qdev,const u8 * addr,u32 type,u16 index)324 static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
325 				 u32 type, u16 index)
326 {
327 	u32 offset = 0;
328 	int status = 0;
329 
330 	switch (type) {
331 	case MAC_ADDR_TYPE_MULTI_MAC: {
332 		u32 upper = (addr[0] << 8) | addr[1];
333 		u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
334 			    (addr[5]);
335 
336 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
337 		if (status)
338 			break;
339 		qlge_write32(qdev, MAC_ADDR_IDX,
340 			     (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 				   MAC_ADDR_E);
342 		qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
344 		if (status)
345 			break;
346 		qlge_write32(qdev, MAC_ADDR_IDX,
347 			     (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
348 				   MAC_ADDR_E);
349 
350 		qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 		break;
353 	}
354 	case MAC_ADDR_TYPE_CAM_MAC: {
355 		u32 cam_output;
356 		u32 upper = (addr[0] << 8) | addr[1];
357 		u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 			    (addr[5]);
359 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 		if (status)
361 			break;
362 		qlge_write32(qdev, MAC_ADDR_IDX,
363 			     (offset++) | /* offset */
364 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
365 				   type); /* type */
366 		qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 		if (status)
369 			break;
370 		qlge_write32(qdev, MAC_ADDR_IDX,
371 			     (offset++) | /* offset */
372 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
373 				   type); /* type */
374 		qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
376 		if (status)
377 			break;
378 		qlge_write32(qdev, MAC_ADDR_IDX,
379 			     (offset) | /* offset */
380 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
381 				   type); /* type */
382 		/* This field should also include the queue id
383 		 * and possibly the function id.  Right now we hardcode
384 		 * the route field to NIC core.
385 		 */
386 		cam_output = (CAM_OUT_ROUTE_NIC |
387 			      (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 			      (0 << CAM_OUT_CQ_ID_SHIFT));
389 		if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 			cam_output |= CAM_OUT_RV;
391 		/* route to NIC core */
392 		qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
393 		break;
394 	}
395 	case MAC_ADDR_TYPE_VLAN: {
396 		u32 enable_bit = *((u32 *)&addr[0]);
397 		/* For VLAN, the addr actually holds a bit that
398 		 * either enables or disables the vlan id we are
399 		 * addressing. It's either MAC_ADDR_E on or off.
400 		 * That's bit-27 we're talking about.
401 		 */
402 		status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 		if (status)
404 			break;
405 		qlge_write32(qdev, MAC_ADDR_IDX,
406 			     offset | /* offset */
407 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 				   type | /* type */
409 				   enable_bit); /* enable/disable */
410 		break;
411 	}
412 	case MAC_ADDR_TYPE_MULTI_FLTR:
413 	default:
414 		netif_crit(qdev, ifup, qdev->ndev,
415 			   "Address type %d not yet supported.\n", type);
416 		status = -EPERM;
417 	}
418 	return status;
419 }
420 
421 /* Set or clear MAC address in hardware. We sometimes
422  * have to clear it to prevent wrong frame routing
423  * especially in a bonding environment.
424  */
qlge_set_mac_addr(struct qlge_adapter * qdev,int set)425 static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
426 {
427 	int status;
428 	char zero_mac_addr[ETH_ALEN];
429 	char *addr;
430 
431 	if (set) {
432 		addr = &qdev->current_mac_addr[0];
433 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 			     "Set Mac addr %pM\n", addr);
435 	} else {
436 		eth_zero_addr(zero_mac_addr);
437 		addr = &zero_mac_addr[0];
438 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 			     "Clearing MAC address\n");
440 	}
441 	status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
442 	if (status)
443 		return status;
444 	status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
445 				       MAC_ADDR_TYPE_CAM_MAC,
446 				       qdev->func * MAX_CQ);
447 	qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 	if (status)
449 		netif_err(qdev, ifup, qdev->ndev,
450 			  "Failed to init mac address.\n");
451 	return status;
452 }
453 
qlge_link_on(struct qlge_adapter * qdev)454 void qlge_link_on(struct qlge_adapter *qdev)
455 {
456 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 	netif_carrier_on(qdev->ndev);
458 	qlge_set_mac_addr(qdev, 1);
459 }
460 
qlge_link_off(struct qlge_adapter * qdev)461 void qlge_link_off(struct qlge_adapter *qdev)
462 {
463 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 	netif_carrier_off(qdev->ndev);
465 	qlge_set_mac_addr(qdev, 0);
466 }
467 
468 /* Get a specific frame routing value from the CAM.
469  * Used for debug and reg dump.
470  */
qlge_get_routing_reg(struct qlge_adapter * qdev,u32 index,u32 * value)471 int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
472 {
473 	int status = 0;
474 
475 	status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
476 	if (status)
477 		goto exit;
478 
479 	qlge_write32(qdev, RT_IDX,
480 		     RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 	status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
482 	if (status)
483 		goto exit;
484 	*value = qlge_read32(qdev, RT_DATA);
485 exit:
486 	return status;
487 }
488 
489 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
490  * to route different frame types to various inbound queues.  We send broadcast/
491  * multicast/error frames to the default queue for slow handling,
492  * and CAM hit/RSS frames to the fast handling queues.
493  */
qlge_set_routing_reg(struct qlge_adapter * qdev,u32 index,u32 mask,int enable)494 static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
495 				int enable)
496 {
497 	int status = -EINVAL; /* Return error if no mask match. */
498 	u32 value = 0;
499 
500 	switch (mask) {
501 	case RT_IDX_CAM_HIT:
502 		{
503 			value = RT_IDX_DST_CAM_Q |	/* dest */
504 			    RT_IDX_TYPE_NICQ |	/* type */
505 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
506 			break;
507 		}
508 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
509 		{
510 			value = RT_IDX_DST_DFLT_Q |	/* dest */
511 			    RT_IDX_TYPE_NICQ |	/* type */
512 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
513 			break;
514 		}
515 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
516 		{
517 			value = RT_IDX_DST_DFLT_Q |	/* dest */
518 			    RT_IDX_TYPE_NICQ |	/* type */
519 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
520 			break;
521 		}
522 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
523 		{
524 			value = RT_IDX_DST_DFLT_Q | /* dest */
525 				RT_IDX_TYPE_NICQ | /* type */
526 				(RT_IDX_IP_CSUM_ERR_SLOT <<
527 				RT_IDX_IDX_SHIFT); /* index */
528 			break;
529 		}
530 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
531 		{
532 			value = RT_IDX_DST_DFLT_Q | /* dest */
533 				RT_IDX_TYPE_NICQ | /* type */
534 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 				RT_IDX_IDX_SHIFT); /* index */
536 			break;
537 		}
538 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
539 		{
540 			value = RT_IDX_DST_DFLT_Q |	/* dest */
541 			    RT_IDX_TYPE_NICQ |	/* type */
542 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
543 			break;
544 		}
545 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
546 		{
547 			value = RT_IDX_DST_DFLT_Q |	/* dest */
548 			    RT_IDX_TYPE_NICQ |	/* type */
549 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
550 			break;
551 		}
552 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
553 		{
554 			value = RT_IDX_DST_DFLT_Q |	/* dest */
555 			    RT_IDX_TYPE_NICQ |	/* type */
556 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
557 			break;
558 		}
559 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
560 		{
561 			value = RT_IDX_DST_RSS |	/* dest */
562 			    RT_IDX_TYPE_NICQ |	/* type */
563 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
564 			break;
565 		}
566 	case 0:		/* Clear the E-bit on an entry. */
567 		{
568 			value = RT_IDX_DST_DFLT_Q |	/* dest */
569 			    RT_IDX_TYPE_NICQ |	/* type */
570 			    (index << RT_IDX_IDX_SHIFT);/* index */
571 			break;
572 		}
573 	default:
574 		netif_err(qdev, ifup, qdev->ndev,
575 			  "Mask type %d not yet supported.\n", mask);
576 		status = -EPERM;
577 		goto exit;
578 	}
579 
580 	if (value) {
581 		status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
582 		if (status)
583 			goto exit;
584 		value |= (enable ? RT_IDX_E : 0);
585 		qlge_write32(qdev, RT_IDX, value);
586 		qlge_write32(qdev, RT_DATA, enable ? mask : 0);
587 	}
588 exit:
589 	return status;
590 }
591 
qlge_enable_interrupts(struct qlge_adapter * qdev)592 static void qlge_enable_interrupts(struct qlge_adapter *qdev)
593 {
594 	qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
595 }
596 
qlge_disable_interrupts(struct qlge_adapter * qdev)597 static void qlge_disable_interrupts(struct qlge_adapter *qdev)
598 {
599 	qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
600 }
601 
qlge_enable_completion_interrupt(struct qlge_adapter * qdev,u32 intr)602 static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
603 {
604 	struct intr_context *ctx = &qdev->intr_context[intr];
605 
606 	qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
607 }
608 
qlge_disable_completion_interrupt(struct qlge_adapter * qdev,u32 intr)609 static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
610 {
611 	struct intr_context *ctx = &qdev->intr_context[intr];
612 
613 	qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
614 }
615 
qlge_enable_all_completion_interrupts(struct qlge_adapter * qdev)616 static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
617 {
618 	int i;
619 
620 	for (i = 0; i < qdev->intr_count; i++)
621 		qlge_enable_completion_interrupt(qdev, i);
622 }
623 
qlge_validate_flash(struct qlge_adapter * qdev,u32 size,const char * str)624 static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
625 {
626 	int status, i;
627 	u16 csum = 0;
628 	__le16 *flash = (__le16 *)&qdev->flash;
629 
630 	status = strncmp((char *)&qdev->flash, str, 4);
631 	if (status) {
632 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
633 		return	status;
634 	}
635 
636 	for (i = 0; i < size; i++)
637 		csum += le16_to_cpu(*flash++);
638 
639 	if (csum)
640 		netif_err(qdev, ifup, qdev->ndev,
641 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
642 
643 	return csum;
644 }
645 
qlge_read_flash_word(struct qlge_adapter * qdev,int offset,__le32 * data)646 static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
647 {
648 	int status = 0;
649 	/* wait for reg to come ready */
650 	status = qlge_wait_reg_rdy(qdev,
651 				   FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
652 	if (status)
653 		goto exit;
654 	/* set up for reg read */
655 	qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656 	/* wait for reg to come ready */
657 	status = qlge_wait_reg_rdy(qdev,
658 				   FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
659 	if (status)
660 		goto exit;
661 	/* This data is stored on flash as an array of
662 	 * __le32.  Since qlge_read32() returns cpu endian
663 	 * we need to swap it back.
664 	 */
665 	*data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
666 exit:
667 	return status;
668 }
669 
qlge_get_8000_flash_params(struct qlge_adapter * qdev)670 static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
671 {
672 	u32 i, size;
673 	int status;
674 	__le32 *p = (__le32 *)&qdev->flash;
675 	u32 offset;
676 	u8 mac_addr[6];
677 
678 	/* Get flash offset for function and adjust
679 	 * for dword access.
680 	 */
681 	if (!qdev->port)
682 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 	else
684 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685 
686 	if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
687 		return -ETIMEDOUT;
688 
689 	size = sizeof(struct flash_params_8000) / sizeof(u32);
690 	for (i = 0; i < size; i++, p++) {
691 		status = qlge_read_flash_word(qdev, i + offset, p);
692 		if (status) {
693 			netif_err(qdev, ifup, qdev->ndev,
694 				  "Error reading flash.\n");
695 			goto exit;
696 		}
697 	}
698 
699 	status = qlge_validate_flash(qdev,
700 				     sizeof(struct flash_params_8000) /
701 				   sizeof(u16),
702 				   "8000");
703 	if (status) {
704 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
705 		status = -EINVAL;
706 		goto exit;
707 	}
708 
709 	/* Extract either manufacturer or BOFM modified
710 	 * MAC address.
711 	 */
712 	if (qdev->flash.flash_params_8000.data_type1 == 2)
713 		memcpy(mac_addr,
714 		       qdev->flash.flash_params_8000.mac_addr1,
715 		       qdev->ndev->addr_len);
716 	else
717 		memcpy(mac_addr,
718 		       qdev->flash.flash_params_8000.mac_addr,
719 		       qdev->ndev->addr_len);
720 
721 	if (!is_valid_ether_addr(mac_addr)) {
722 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
723 		status = -EINVAL;
724 		goto exit;
725 	}
726 
727 	eth_hw_addr_set(qdev->ndev, mac_addr);
728 
729 exit:
730 	qlge_sem_unlock(qdev, SEM_FLASH_MASK);
731 	return status;
732 }
733 
qlge_get_8012_flash_params(struct qlge_adapter * qdev)734 static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
735 {
736 	int i;
737 	int status;
738 	__le32 *p = (__le32 *)&qdev->flash;
739 	u32 offset = 0;
740 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
741 
742 	/* Second function's parameters follow the first
743 	 * function's.
744 	 */
745 	if (qdev->port)
746 		offset = size;
747 
748 	if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
749 		return -ETIMEDOUT;
750 
751 	for (i = 0; i < size; i++, p++) {
752 		status = qlge_read_flash_word(qdev, i + offset, p);
753 		if (status) {
754 			netif_err(qdev, ifup, qdev->ndev,
755 				  "Error reading flash.\n");
756 			goto exit;
757 		}
758 	}
759 
760 	status = qlge_validate_flash(qdev,
761 				     sizeof(struct flash_params_8012) /
762 				       sizeof(u16),
763 				     "8012");
764 	if (status) {
765 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
766 		status = -EINVAL;
767 		goto exit;
768 	}
769 
770 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
771 		status = -EINVAL;
772 		goto exit;
773 	}
774 
775 	eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
776 
777 exit:
778 	qlge_sem_unlock(qdev, SEM_FLASH_MASK);
779 	return status;
780 }
781 
782 /* xgmac register are located behind the xgmac_addr and xgmac_data
783  * register pair.  Each read/write requires us to wait for the ready
784  * bit before reading/writing the data.
785  */
qlge_write_xgmac_reg(struct qlge_adapter * qdev,u32 reg,u32 data)786 static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
787 {
788 	int status;
789 	/* wait for reg to come ready */
790 	status = qlge_wait_reg_rdy(qdev,
791 				   XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
792 	if (status)
793 		return status;
794 	/* write the data to the data reg */
795 	qlge_write32(qdev, XGMAC_DATA, data);
796 	/* trigger the write */
797 	qlge_write32(qdev, XGMAC_ADDR, reg);
798 	return status;
799 }
800 
801 /* xgmac register are located behind the xgmac_addr and xgmac_data
802  * register pair.  Each read/write requires us to wait for the ready
803  * bit before reading/writing the data.
804  */
qlge_read_xgmac_reg(struct qlge_adapter * qdev,u32 reg,u32 * data)805 int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
806 {
807 	int status = 0;
808 	/* wait for reg to come ready */
809 	status = qlge_wait_reg_rdy(qdev,
810 				   XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
811 	if (status)
812 		goto exit;
813 	/* set up for reg read */
814 	qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
815 	/* wait for reg to come ready */
816 	status = qlge_wait_reg_rdy(qdev,
817 				   XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
818 	if (status)
819 		goto exit;
820 	/* get the data */
821 	*data = qlge_read32(qdev, XGMAC_DATA);
822 exit:
823 	return status;
824 }
825 
826 /* This is used for reading the 64-bit statistics regs. */
qlge_read_xgmac_reg64(struct qlge_adapter * qdev,u32 reg,u64 * data)827 int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
828 {
829 	int status = 0;
830 	u32 hi = 0;
831 	u32 lo = 0;
832 
833 	status = qlge_read_xgmac_reg(qdev, reg, &lo);
834 	if (status)
835 		goto exit;
836 
837 	status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
838 	if (status)
839 		goto exit;
840 
841 	*data = (u64)lo | ((u64)hi << 32);
842 
843 exit:
844 	return status;
845 }
846 
qlge_8000_port_initialize(struct qlge_adapter * qdev)847 static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
848 {
849 	int status;
850 	/*
851 	 * Get MPI firmware version for driver banner
852 	 * and ethool info.
853 	 */
854 	status = qlge_mb_about_fw(qdev);
855 	if (status)
856 		goto exit;
857 	status = qlge_mb_get_fw_state(qdev);
858 	if (status)
859 		goto exit;
860 	/* Wake up a worker to get/set the TX/RX frame sizes. */
861 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
862 exit:
863 	return status;
864 }
865 
866 /* Take the MAC Core out of reset.
867  * Enable statistics counting.
868  * Take the transmitter/receiver out of reset.
869  * This functionality may be done in the MPI firmware at a
870  * later date.
871  */
qlge_8012_port_initialize(struct qlge_adapter * qdev)872 static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
873 {
874 	int status = 0;
875 	u32 data;
876 
877 	if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
878 		/* Another function has the semaphore, so
879 		 * wait for the port init bit to come ready.
880 		 */
881 		netif_info(qdev, link, qdev->ndev,
882 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
883 		status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
884 		if (status) {
885 			netif_crit(qdev, link, qdev->ndev,
886 				   "Port initialize timed out.\n");
887 		}
888 		return status;
889 	}
890 
891 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
892 	/* Set the core reset. */
893 	status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
894 	if (status)
895 		goto end;
896 	data |= GLOBAL_CFG_RESET;
897 	status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
898 	if (status)
899 		goto end;
900 
901 	/* Clear the core reset and turn on jumbo for receiver. */
902 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
903 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
904 	data |= GLOBAL_CFG_TX_STAT_EN;
905 	data |= GLOBAL_CFG_RX_STAT_EN;
906 	status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
907 	if (status)
908 		goto end;
909 
910 	/* Enable transmitter, and clear it's reset. */
911 	status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
912 	if (status)
913 		goto end;
914 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
915 	data |= TX_CFG_EN;	/* Enable the transmitter. */
916 	status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
917 	if (status)
918 		goto end;
919 
920 	/* Enable receiver and clear it's reset. */
921 	status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
922 	if (status)
923 		goto end;
924 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
925 	data |= RX_CFG_EN;	/* Enable the receiver. */
926 	status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
927 	if (status)
928 		goto end;
929 
930 	/* Turn on jumbo. */
931 	status =
932 	    qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
933 	if (status)
934 		goto end;
935 	status =
936 	    qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
937 	if (status)
938 		goto end;
939 
940 	/* Signal to the world that the port is enabled.        */
941 	qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
942 end:
943 	qlge_sem_unlock(qdev, qdev->xg_sem_mask);
944 	return status;
945 }
946 
qlge_lbq_block_size(struct qlge_adapter * qdev)947 static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
948 {
949 	return PAGE_SIZE << qdev->lbq_buf_order;
950 }
951 
qlge_get_curr_buf(struct qlge_bq * bq)952 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
953 {
954 	struct qlge_bq_desc *bq_desc;
955 
956 	bq_desc = &bq->queue[bq->next_to_clean];
957 	bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
958 
959 	return bq_desc;
960 }
961 
qlge_get_curr_lchunk(struct qlge_adapter * qdev,struct rx_ring * rx_ring)962 static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
963 						 struct rx_ring *rx_ring)
964 {
965 	struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
966 
967 	dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
968 				qdev->lbq_buf_size, DMA_FROM_DEVICE);
969 
970 	if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
971 	    qlge_lbq_block_size(qdev)) {
972 		/* last chunk of the master page */
973 		dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
974 			       qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
975 	}
976 
977 	return lbq_desc;
978 }
979 
980 /* Update an rx ring index. */
qlge_update_cq(struct rx_ring * rx_ring)981 static void qlge_update_cq(struct rx_ring *rx_ring)
982 {
983 	rx_ring->cnsmr_idx++;
984 	rx_ring->curr_entry++;
985 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
986 		rx_ring->cnsmr_idx = 0;
987 		rx_ring->curr_entry = rx_ring->cq_base;
988 	}
989 }
990 
qlge_write_cq_idx(struct rx_ring * rx_ring)991 static void qlge_write_cq_idx(struct rx_ring *rx_ring)
992 {
993 	qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
994 }
995 
996 static const char * const bq_type_name[] = {
997 	[QLGE_SB] = "sbq",
998 	[QLGE_LB] = "lbq",
999 };
1000 
1001 /* return 0 or negative error */
qlge_refill_sb(struct rx_ring * rx_ring,struct qlge_bq_desc * sbq_desc,gfp_t gfp)1002 static int qlge_refill_sb(struct rx_ring *rx_ring,
1003 			  struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1004 {
1005 	struct qlge_adapter *qdev = rx_ring->qdev;
1006 	struct sk_buff *skb;
1007 
1008 	if (sbq_desc->p.skb)
1009 		return 0;
1010 
1011 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1012 		     "ring %u sbq: getting new skb for index %d.\n",
1013 		     rx_ring->cq_id, sbq_desc->index);
1014 
1015 	skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1016 	if (!skb)
1017 		return -ENOMEM;
1018 	skb_reserve(skb, QLGE_SB_PAD);
1019 
1020 	sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1021 					    SMALL_BUF_MAP_SIZE,
1022 					    DMA_FROM_DEVICE);
1023 	if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1024 		netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1025 		dev_kfree_skb_any(skb);
1026 		return -EIO;
1027 	}
1028 	*sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1029 
1030 	sbq_desc->p.skb = skb;
1031 	return 0;
1032 }
1033 
1034 /* return 0 or negative error */
qlge_refill_lb(struct rx_ring * rx_ring,struct qlge_bq_desc * lbq_desc,gfp_t gfp)1035 static int qlge_refill_lb(struct rx_ring *rx_ring,
1036 			  struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1037 {
1038 	struct qlge_adapter *qdev = rx_ring->qdev;
1039 	struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1040 
1041 	if (!master_chunk->page) {
1042 		struct page *page;
1043 		dma_addr_t dma_addr;
1044 
1045 		page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1046 		if (unlikely(!page))
1047 			return -ENOMEM;
1048 		dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1049 					qlge_lbq_block_size(qdev),
1050 					DMA_FROM_DEVICE);
1051 		if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1052 			__free_pages(page, qdev->lbq_buf_order);
1053 			netif_err(qdev, drv, qdev->ndev,
1054 				  "PCI mapping failed.\n");
1055 			return -EIO;
1056 		}
1057 		master_chunk->page = page;
1058 		master_chunk->va = page_address(page);
1059 		master_chunk->offset = 0;
1060 		rx_ring->chunk_dma_addr = dma_addr;
1061 	}
1062 
1063 	lbq_desc->p.pg_chunk = *master_chunk;
1064 	lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1065 	*lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1066 					 lbq_desc->p.pg_chunk.offset);
1067 
1068 	/* Adjust the master page chunk for next
1069 	 * buffer get.
1070 	 */
1071 	master_chunk->offset += qdev->lbq_buf_size;
1072 	if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1073 		master_chunk->page = NULL;
1074 	} else {
1075 		master_chunk->va += qdev->lbq_buf_size;
1076 		get_page(master_chunk->page);
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 /* return 0 or negative error */
qlge_refill_bq(struct qlge_bq * bq,gfp_t gfp)1083 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1084 {
1085 	struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1086 	struct qlge_adapter *qdev = rx_ring->qdev;
1087 	struct qlge_bq_desc *bq_desc;
1088 	int refill_count;
1089 	int retval;
1090 	int i;
1091 
1092 	refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1093 				    bq->next_to_use);
1094 	if (!refill_count)
1095 		return 0;
1096 
1097 	i = bq->next_to_use;
1098 	bq_desc = &bq->queue[i];
1099 	i -= QLGE_BQ_LEN;
1100 	do {
1101 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1102 			     "ring %u %s: try cleaning idx %d\n",
1103 			     rx_ring->cq_id, bq_type_name[bq->type], i);
1104 
1105 		if (bq->type == QLGE_SB)
1106 			retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1107 		else
1108 			retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1109 		if (retval < 0) {
1110 			netif_err(qdev, ifup, qdev->ndev,
1111 				  "ring %u %s: Could not get a page chunk, idx %d\n",
1112 				  rx_ring->cq_id, bq_type_name[bq->type], i);
1113 			break;
1114 		}
1115 
1116 		bq_desc++;
1117 		i++;
1118 		if (unlikely(!i)) {
1119 			bq_desc = &bq->queue[0];
1120 			i -= QLGE_BQ_LEN;
1121 		}
1122 		refill_count--;
1123 	} while (refill_count);
1124 	i += QLGE_BQ_LEN;
1125 
1126 	if (bq->next_to_use != i) {
1127 		if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1128 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1129 				     "ring %u %s: updating prod idx = %d.\n",
1130 				     rx_ring->cq_id, bq_type_name[bq->type],
1131 				     i);
1132 			qlge_write_db_reg(i, bq->prod_idx_db_reg);
1133 		}
1134 		bq->next_to_use = i;
1135 	}
1136 
1137 	return retval;
1138 }
1139 
qlge_update_buffer_queues(struct rx_ring * rx_ring,gfp_t gfp,unsigned long delay)1140 static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1141 				      unsigned long delay)
1142 {
1143 	bool sbq_fail, lbq_fail;
1144 
1145 	sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1146 	lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1147 
1148 	/* Minimum number of buffers needed to be able to receive at least one
1149 	 * frame of any format:
1150 	 * sbq: 1 for header + 1 for data
1151 	 * lbq: mtu 9000 / lb size
1152 	 * Below this, the queue might stall.
1153 	 */
1154 	if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1155 	    (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1156 	     DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1157 		/* Allocations can take a long time in certain cases (ex.
1158 		 * reclaim). Therefore, use a workqueue for long-running
1159 		 * work items.
1160 		 */
1161 		queue_delayed_work_on(smp_processor_id(), system_long_wq,
1162 				      &rx_ring->refill_work, delay);
1163 }
1164 
qlge_slow_refill(struct work_struct * work)1165 static void qlge_slow_refill(struct work_struct *work)
1166 {
1167 	struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1168 					       refill_work.work);
1169 	struct napi_struct *napi = &rx_ring->napi;
1170 
1171 	napi_disable(napi);
1172 	qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1173 	napi_enable(napi);
1174 
1175 	local_bh_disable();
1176 	/* napi_disable() might have prevented incomplete napi work from being
1177 	 * rescheduled.
1178 	 */
1179 	napi_schedule(napi);
1180 	/* trigger softirq processing */
1181 	local_bh_enable();
1182 }
1183 
1184 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1185  * fails at some stage, or from the interrupt when a tx completes.
1186  */
qlge_unmap_send(struct qlge_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1187 static void qlge_unmap_send(struct qlge_adapter *qdev,
1188 			    struct tx_ring_desc *tx_ring_desc, int mapped)
1189 {
1190 	int i;
1191 
1192 	for (i = 0; i < mapped; i++) {
1193 		if (i == 0 || (i == 7 && mapped > 7)) {
1194 			/*
1195 			 * Unmap the skb->data area, or the
1196 			 * external sglist (AKA the Outbound
1197 			 * Address List (OAL)).
1198 			 * If its the zeroeth element, then it's
1199 			 * the skb->data area.  If it's the 7th
1200 			 * element and there is more than 6 frags,
1201 			 * then its an OAL.
1202 			 */
1203 			if (i == 7) {
1204 				netif_printk(qdev, tx_done, KERN_DEBUG,
1205 					     qdev->ndev,
1206 					     "unmapping OAL area.\n");
1207 			}
1208 			dma_unmap_single(&qdev->pdev->dev,
1209 					 dma_unmap_addr(&tx_ring_desc->map[i],
1210 							mapaddr),
1211 					 dma_unmap_len(&tx_ring_desc->map[i],
1212 						       maplen),
1213 					 DMA_TO_DEVICE);
1214 		} else {
1215 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1216 				     "unmapping frag %d.\n", i);
1217 			dma_unmap_page(&qdev->pdev->dev,
1218 				       dma_unmap_addr(&tx_ring_desc->map[i],
1219 						      mapaddr),
1220 				       dma_unmap_len(&tx_ring_desc->map[i],
1221 						     maplen), DMA_TO_DEVICE);
1222 		}
1223 	}
1224 }
1225 
1226 /* Map the buffers for this transmit.  This will return
1227  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1228  */
qlge_map_send(struct qlge_adapter * qdev,struct qlge_ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1229 static int qlge_map_send(struct qlge_adapter *qdev,
1230 			 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1231 			 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1232 {
1233 	int len = skb_headlen(skb);
1234 	dma_addr_t map;
1235 	int frag_idx, err, map_idx = 0;
1236 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1237 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1238 
1239 	if (frag_cnt) {
1240 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1241 			     "frag_cnt = %d.\n", frag_cnt);
1242 	}
1243 	/*
1244 	 * Map the skb buffer first.
1245 	 */
1246 	map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1247 
1248 	err = dma_mapping_error(&qdev->pdev->dev, map);
1249 	if (err) {
1250 		netif_err(qdev, tx_queued, qdev->ndev,
1251 			  "PCI mapping failed with error: %d\n", err);
1252 
1253 		return NETDEV_TX_BUSY;
1254 	}
1255 
1256 	tbd->len = cpu_to_le32(len);
1257 	tbd->addr = cpu_to_le64(map);
1258 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1259 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1260 	map_idx++;
1261 
1262 	/*
1263 	 * This loop fills the remainder of the 8 address descriptors
1264 	 * in the IOCB.  If there are more than 7 fragments, then the
1265 	 * eighth address desc will point to an external list (OAL).
1266 	 * When this happens, the remainder of the frags will be stored
1267 	 * in this list.
1268 	 */
1269 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1270 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1271 
1272 		tbd++;
1273 		if (frag_idx == 6 && frag_cnt > 7) {
1274 			/* Let's tack on an sglist.
1275 			 * Our control block will now
1276 			 * look like this:
1277 			 * iocb->seg[0] = skb->data
1278 			 * iocb->seg[1] = frag[0]
1279 			 * iocb->seg[2] = frag[1]
1280 			 * iocb->seg[3] = frag[2]
1281 			 * iocb->seg[4] = frag[3]
1282 			 * iocb->seg[5] = frag[4]
1283 			 * iocb->seg[6] = frag[5]
1284 			 * iocb->seg[7] = ptr to OAL (external sglist)
1285 			 * oal->seg[0] = frag[6]
1286 			 * oal->seg[1] = frag[7]
1287 			 * oal->seg[2] = frag[8]
1288 			 * oal->seg[3] = frag[9]
1289 			 * oal->seg[4] = frag[10]
1290 			 *      etc...
1291 			 */
1292 			/* Tack on the OAL in the eighth segment of IOCB. */
1293 			map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1294 					     sizeof(struct qlge_oal),
1295 					     DMA_TO_DEVICE);
1296 			err = dma_mapping_error(&qdev->pdev->dev, map);
1297 			if (err) {
1298 				netif_err(qdev, tx_queued, qdev->ndev,
1299 					  "PCI mapping outbound address list with error: %d\n",
1300 					  err);
1301 				goto map_error;
1302 			}
1303 
1304 			tbd->addr = cpu_to_le64(map);
1305 			/*
1306 			 * The length is the number of fragments
1307 			 * that remain to be mapped times the length
1308 			 * of our sglist (OAL).
1309 			 */
1310 			tbd->len =
1311 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1312 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1313 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1314 					   map);
1315 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1316 					  sizeof(struct qlge_oal));
1317 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1318 			map_idx++;
1319 		}
1320 
1321 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1322 				       DMA_TO_DEVICE);
1323 
1324 		err = dma_mapping_error(&qdev->pdev->dev, map);
1325 		if (err) {
1326 			netif_err(qdev, tx_queued, qdev->ndev,
1327 				  "PCI mapping frags failed with error: %d.\n",
1328 				  err);
1329 			goto map_error;
1330 		}
1331 
1332 		tbd->addr = cpu_to_le64(map);
1333 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1334 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1335 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1336 				  skb_frag_size(frag));
1337 	}
1338 	/* Save the number of segments we've mapped. */
1339 	tx_ring_desc->map_cnt = map_idx;
1340 	/* Terminate the last segment. */
1341 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1342 	return NETDEV_TX_OK;
1343 
1344 map_error:
1345 	/*
1346 	 * If the first frag mapping failed, then i will be zero.
1347 	 * This causes the unmap of the skb->data area.  Otherwise
1348 	 * we pass in the number of frags that mapped successfully
1349 	 * so they can be umapped.
1350 	 */
1351 	qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1352 	return NETDEV_TX_BUSY;
1353 }
1354 
1355 /* Categorizing receive firmware frame errors */
qlge_categorize_rx_err(struct qlge_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1356 static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1357 				   struct rx_ring *rx_ring)
1358 {
1359 	struct nic_stats *stats = &qdev->nic_stats;
1360 
1361 	stats->rx_err_count++;
1362 	rx_ring->rx_errors++;
1363 
1364 	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1365 	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1366 		stats->rx_code_err++;
1367 		break;
1368 	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1369 		stats->rx_oversize_err++;
1370 		break;
1371 	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1372 		stats->rx_undersize_err++;
1373 		break;
1374 	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1375 		stats->rx_preamble_err++;
1376 		break;
1377 	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1378 		stats->rx_frame_len_err++;
1379 		break;
1380 	case IB_MAC_IOCB_RSP_ERR_CRC:
1381 		stats->rx_crc_err++;
1382 		break;
1383 	default:
1384 		break;
1385 	}
1386 }
1387 
1388 /*
1389  * qlge_update_mac_hdr_len - helper routine to update the mac header length
1390  * based on vlan tags if present
1391  */
qlge_update_mac_hdr_len(struct qlge_adapter * qdev,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1392 static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1393 				    struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1394 				    void *page, size_t *len)
1395 {
1396 	u16 *tags;
1397 
1398 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1399 		return;
1400 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1401 		tags = (u16 *)page;
1402 		/* Look for stacked vlan tags in ethertype field */
1403 		if (tags[6] == ETH_P_8021Q &&
1404 		    tags[8] == ETH_P_8021Q)
1405 			*len += 2 * VLAN_HLEN;
1406 		else
1407 			*len += VLAN_HLEN;
1408 	}
1409 }
1410 
1411 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_gro_page(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1412 static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1413 					 struct rx_ring *rx_ring,
1414 					 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1415 					 u32 length, u16 vlan_id)
1416 {
1417 	struct sk_buff *skb;
1418 	struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1419 	struct napi_struct *napi = &rx_ring->napi;
1420 
1421 	/* Frame error, so drop the packet. */
1422 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1423 		qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1424 		put_page(lbq_desc->p.pg_chunk.page);
1425 		return;
1426 	}
1427 	napi->dev = qdev->ndev;
1428 
1429 	skb = napi_get_frags(napi);
1430 	if (!skb) {
1431 		netif_err(qdev, drv, qdev->ndev,
1432 			  "Couldn't get an skb, exiting.\n");
1433 		rx_ring->rx_dropped++;
1434 		put_page(lbq_desc->p.pg_chunk.page);
1435 		return;
1436 	}
1437 	prefetch(lbq_desc->p.pg_chunk.va);
1438 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1439 			     lbq_desc->p.pg_chunk.page,
1440 			     lbq_desc->p.pg_chunk.offset,
1441 			     length);
1442 
1443 	skb->len += length;
1444 	skb->data_len += length;
1445 	skb->truesize += length;
1446 	skb_shinfo(skb)->nr_frags++;
1447 
1448 	rx_ring->rx_packets++;
1449 	rx_ring->rx_bytes += length;
1450 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 	skb_record_rx_queue(skb, rx_ring->cq_id);
1452 	if (vlan_id != 0xffff)
1453 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1454 	napi_gro_frags(napi);
1455 }
1456 
1457 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_page(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1458 static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1459 				     struct rx_ring *rx_ring,
1460 				     struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1461 				     u32 length, u16 vlan_id)
1462 {
1463 	struct net_device *ndev = qdev->ndev;
1464 	struct sk_buff *skb = NULL;
1465 	void *addr;
1466 	struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1467 	struct napi_struct *napi = &rx_ring->napi;
1468 	size_t hlen = ETH_HLEN;
1469 
1470 	skb = netdev_alloc_skb(ndev, length);
1471 	if (!skb) {
1472 		rx_ring->rx_dropped++;
1473 		put_page(lbq_desc->p.pg_chunk.page);
1474 		return;
1475 	}
1476 
1477 	addr = lbq_desc->p.pg_chunk.va;
1478 	prefetch(addr);
1479 
1480 	/* Frame error, so drop the packet. */
1481 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1482 		qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1483 		goto err_out;
1484 	}
1485 
1486 	/* Update the MAC header length*/
1487 	qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1488 
1489 	/* The max framesize filter on this chip is set higher than
1490 	 * MTU since FCoE uses 2k frames.
1491 	 */
1492 	if (skb->len > ndev->mtu + hlen) {
1493 		netif_err(qdev, drv, qdev->ndev,
1494 			  "Segment too small, dropping.\n");
1495 		rx_ring->rx_dropped++;
1496 		goto err_out;
1497 	}
1498 	skb_put_data(skb, addr, hlen);
1499 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1500 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1501 		     length);
1502 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1503 			   lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1504 	skb->len += length - hlen;
1505 	skb->data_len += length - hlen;
1506 	skb->truesize += length - hlen;
1507 
1508 	rx_ring->rx_packets++;
1509 	rx_ring->rx_bytes += skb->len;
1510 	skb->protocol = eth_type_trans(skb, ndev);
1511 	skb_checksum_none_assert(skb);
1512 
1513 	if ((ndev->features & NETIF_F_RXCSUM) &&
1514 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1515 		/* TCP frame. */
1516 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1517 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1518 				     "TCP checksum done!\n");
1519 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1521 			   (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1522 			/* Unfragmented ipv4 UDP frame. */
1523 			struct iphdr *iph =
1524 				(struct iphdr *)((u8 *)addr + hlen);
1525 			if (!(iph->frag_off &
1526 			      htons(IP_MF | IP_OFFSET))) {
1527 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 				netif_printk(qdev, rx_status, KERN_DEBUG,
1529 					     qdev->ndev,
1530 					     "UDP checksum done!\n");
1531 			}
1532 		}
1533 	}
1534 
1535 	skb_record_rx_queue(skb, rx_ring->cq_id);
1536 	if (vlan_id != 0xffff)
1537 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1538 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1539 		napi_gro_receive(napi, skb);
1540 	else
1541 		netif_receive_skb(skb);
1542 	return;
1543 err_out:
1544 	dev_kfree_skb_any(skb);
1545 	put_page(lbq_desc->p.pg_chunk.page);
1546 }
1547 
1548 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_skb(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1549 static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1550 				    struct rx_ring *rx_ring,
1551 				    struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1552 				    u32 length, u16 vlan_id)
1553 {
1554 	struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1555 	struct net_device *ndev = qdev->ndev;
1556 	struct sk_buff *skb, *new_skb;
1557 
1558 	skb = sbq_desc->p.skb;
1559 	/* Allocate new_skb and copy */
1560 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1561 	if (!new_skb) {
1562 		rx_ring->rx_dropped++;
1563 		return;
1564 	}
1565 	skb_reserve(new_skb, NET_IP_ALIGN);
1566 
1567 	dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1568 				SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1569 
1570 	skb_put_data(new_skb, skb->data, length);
1571 
1572 	skb = new_skb;
1573 
1574 	/* Frame error, so drop the packet. */
1575 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1576 		qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1577 		dev_kfree_skb_any(skb);
1578 		return;
1579 	}
1580 
1581 	/* loopback self test for ethtool */
1582 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1583 		qlge_check_lb_frame(qdev, skb);
1584 		dev_kfree_skb_any(skb);
1585 		return;
1586 	}
1587 
1588 	/* The max framesize filter on this chip is set higher than
1589 	 * MTU since FCoE uses 2k frames.
1590 	 */
1591 	if (skb->len > ndev->mtu + ETH_HLEN) {
1592 		dev_kfree_skb_any(skb);
1593 		rx_ring->rx_dropped++;
1594 		return;
1595 	}
1596 
1597 	prefetch(skb->data);
1598 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1599 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600 			     "%s Multicast.\n",
1601 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1602 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1603 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1604 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1605 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1607 	}
1608 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1609 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1610 			     "Promiscuous Packet.\n");
1611 
1612 	rx_ring->rx_packets++;
1613 	rx_ring->rx_bytes += skb->len;
1614 	skb->protocol = eth_type_trans(skb, ndev);
1615 	skb_checksum_none_assert(skb);
1616 
1617 	/* If rx checksum is on, and there are no
1618 	 * csum or frame errors.
1619 	 */
1620 	if ((ndev->features & NETIF_F_RXCSUM) &&
1621 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1622 		/* TCP frame. */
1623 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1624 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 				     "TCP checksum done!\n");
1626 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1627 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1628 			   (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1629 			/* Unfragmented ipv4 UDP frame. */
1630 			struct iphdr *iph = (struct iphdr *)skb->data;
1631 
1632 			if (!(iph->frag_off &
1633 			      htons(IP_MF | IP_OFFSET))) {
1634 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1635 				netif_printk(qdev, rx_status, KERN_DEBUG,
1636 					     qdev->ndev,
1637 					     "UDP checksum done!\n");
1638 			}
1639 		}
1640 	}
1641 
1642 	skb_record_rx_queue(skb, rx_ring->cq_id);
1643 	if (vlan_id != 0xffff)
1644 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1645 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1646 		napi_gro_receive(&rx_ring->napi, skb);
1647 	else
1648 		netif_receive_skb(skb);
1649 }
1650 
qlge_realign_skb(struct sk_buff * skb,int len)1651 static void qlge_realign_skb(struct sk_buff *skb, int len)
1652 {
1653 	void *temp_addr = skb->data;
1654 
1655 	/* Undo the skb_reserve(skb,32) we did before
1656 	 * giving to hardware, and realign data on
1657 	 * a 2-byte boundary.
1658 	 */
1659 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1660 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1661 	memmove(skb->data, temp_addr, len);
1662 }
1663 
1664 /*
1665  * This function builds an skb for the given inbound
1666  * completion.  It will be rewritten for readability in the near
1667  * future, but for not it works well.
1668  */
qlge_build_rx_skb(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp)1669 static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1670 					 struct rx_ring *rx_ring,
1671 					 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1672 {
1673 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1674 	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1675 	struct qlge_bq_desc *lbq_desc, *sbq_desc;
1676 	struct sk_buff *skb = NULL;
1677 	size_t hlen = ETH_HLEN;
1678 
1679 	/*
1680 	 * Handle the header buffer if present.
1681 	 */
1682 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1683 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1684 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1685 			     "Header of %d bytes in small buffer.\n", hdr_len);
1686 		/*
1687 		 * Headers fit nicely into a small buffer.
1688 		 */
1689 		sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1690 		dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1691 				 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1692 		skb = sbq_desc->p.skb;
1693 		qlge_realign_skb(skb, hdr_len);
1694 		skb_put(skb, hdr_len);
1695 		sbq_desc->p.skb = NULL;
1696 	}
1697 
1698 	/*
1699 	 * Handle the data buffer(s).
1700 	 */
1701 	if (unlikely(!length)) {	/* Is there data too? */
1702 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1703 			     "No Data buffer in this packet.\n");
1704 		return skb;
1705 	}
1706 
1707 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1708 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1709 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1710 				     "Headers in small, data of %d bytes in small, combine them.\n",
1711 				     length);
1712 			/*
1713 			 * Data is less than small buffer size so it's
1714 			 * stuffed in a small buffer.
1715 			 * For this case we append the data
1716 			 * from the "data" small buffer to the "header" small
1717 			 * buffer.
1718 			 */
1719 			sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1720 			dma_sync_single_for_cpu(&qdev->pdev->dev,
1721 						sbq_desc->dma_addr,
1722 						SMALL_BUF_MAP_SIZE,
1723 						DMA_FROM_DEVICE);
1724 			skb_put_data(skb, sbq_desc->p.skb->data, length);
1725 		} else {
1726 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1727 				     "%d bytes in a single small buffer.\n",
1728 				     length);
1729 			sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1730 			skb = sbq_desc->p.skb;
1731 			qlge_realign_skb(skb, length);
1732 			skb_put(skb, length);
1733 			dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1734 					 SMALL_BUF_MAP_SIZE,
1735 					 DMA_FROM_DEVICE);
1736 			sbq_desc->p.skb = NULL;
1737 		}
1738 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1739 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1740 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1741 				     "Header in small, %d bytes in large. Chain large to small!\n",
1742 				     length);
1743 			/*
1744 			 * The data is in a single large buffer.  We
1745 			 * chain it to the header buffer's skb and let
1746 			 * it rip.
1747 			 */
1748 			lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1749 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1750 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1751 				     lbq_desc->p.pg_chunk.offset, length);
1752 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1753 					   lbq_desc->p.pg_chunk.offset, length);
1754 			skb->len += length;
1755 			skb->data_len += length;
1756 			skb->truesize += length;
1757 		} else {
1758 			/*
1759 			 * The headers and data are in a single large buffer. We
1760 			 * copy it to a new skb and let it go. This can happen with
1761 			 * jumbo mtu on a non-TCP/UDP frame.
1762 			 */
1763 			lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1764 			skb = netdev_alloc_skb(qdev->ndev, length);
1765 			if (!skb) {
1766 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1767 					     "No skb available, drop the packet.\n");
1768 				return NULL;
1769 			}
1770 			dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1771 				       qdev->lbq_buf_size,
1772 				       DMA_FROM_DEVICE);
1773 			skb_reserve(skb, NET_IP_ALIGN);
1774 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1776 				     length);
1777 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1778 					   lbq_desc->p.pg_chunk.offset,
1779 					   length);
1780 			skb->len += length;
1781 			skb->data_len += length;
1782 			skb->truesize += length;
1783 			qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1784 						lbq_desc->p.pg_chunk.va,
1785 						&hlen);
1786 			__pskb_pull_tail(skb, hlen);
1787 		}
1788 	} else {
1789 		/*
1790 		 * The data is in a chain of large buffers
1791 		 * pointed to by a small buffer.  We loop
1792 		 * thru and chain them to the our small header
1793 		 * buffer's skb.
1794 		 * frags:  There are 18 max frags and our small
1795 		 *         buffer will hold 32 of them. The thing is,
1796 		 *         we'll use 3 max for our 9000 byte jumbo
1797 		 *         frames.  If the MTU goes up we could
1798 		 *          eventually be in trouble.
1799 		 */
1800 		int size, i = 0;
1801 
1802 		sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1803 		dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1804 				 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1805 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1806 			/*
1807 			 * This is an non TCP/UDP IP frame, so
1808 			 * the headers aren't split into a small
1809 			 * buffer.  We have to use the small buffer
1810 			 * that contains our sg list as our skb to
1811 			 * send upstairs. Copy the sg list here to
1812 			 * a local buffer and use it to find the
1813 			 * pages to chain.
1814 			 */
1815 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1816 				     "%d bytes of headers & data in chain of large.\n",
1817 				     length);
1818 			skb = sbq_desc->p.skb;
1819 			sbq_desc->p.skb = NULL;
1820 			skb_reserve(skb, NET_IP_ALIGN);
1821 		}
1822 		do {
1823 			lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1824 			size = min(length, qdev->lbq_buf_size);
1825 
1826 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1827 				     "Adding page %d to skb for %d bytes.\n",
1828 				     i, size);
1829 			skb_fill_page_desc(skb, i,
1830 					   lbq_desc->p.pg_chunk.page,
1831 					   lbq_desc->p.pg_chunk.offset, size);
1832 			skb->len += size;
1833 			skb->data_len += size;
1834 			skb->truesize += size;
1835 			length -= size;
1836 			i++;
1837 		} while (length > 0);
1838 		qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1839 					&hlen);
1840 		__pskb_pull_tail(skb, hlen);
1841 	}
1842 	return skb;
1843 }
1844 
1845 /* Process an inbound completion from an rx ring. */
qlge_process_mac_split_rx_intr(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1846 static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1847 					   struct rx_ring *rx_ring,
1848 					   struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1849 					   u16 vlan_id)
1850 {
1851 	struct net_device *ndev = qdev->ndev;
1852 	struct sk_buff *skb = NULL;
1853 
1854 	skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1855 	if (unlikely(!skb)) {
1856 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 			     "No skb available, drop packet.\n");
1858 		rx_ring->rx_dropped++;
1859 		return;
1860 	}
1861 
1862 	/* Frame error, so drop the packet. */
1863 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1864 		qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1865 		dev_kfree_skb_any(skb);
1866 		return;
1867 	}
1868 
1869 	/* The max framesize filter on this chip is set higher than
1870 	 * MTU since FCoE uses 2k frames.
1871 	 */
1872 	if (skb->len > ndev->mtu + ETH_HLEN) {
1873 		dev_kfree_skb_any(skb);
1874 		rx_ring->rx_dropped++;
1875 		return;
1876 	}
1877 
1878 	/* loopback self test for ethtool */
1879 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1880 		qlge_check_lb_frame(qdev, skb);
1881 		dev_kfree_skb_any(skb);
1882 		return;
1883 	}
1884 
1885 	prefetch(skb->data);
1886 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1887 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1888 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1889 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1890 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1891 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1892 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1894 		rx_ring->rx_multicast++;
1895 	}
1896 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1897 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1898 			     "Promiscuous Packet.\n");
1899 	}
1900 
1901 	skb->protocol = eth_type_trans(skb, ndev);
1902 	skb_checksum_none_assert(skb);
1903 
1904 	/* If rx checksum is on, and there are no
1905 	 * csum or frame errors.
1906 	 */
1907 	if ((ndev->features & NETIF_F_RXCSUM) &&
1908 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1909 		/* TCP frame. */
1910 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1911 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1912 				     "TCP checksum done!\n");
1913 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1914 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1915 			   (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1916 			/* Unfragmented ipv4 UDP frame. */
1917 			struct iphdr *iph = (struct iphdr *)skb->data;
1918 
1919 			if (!(iph->frag_off &
1920 			      htons(IP_MF | IP_OFFSET))) {
1921 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1922 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1923 					     "TCP checksum done!\n");
1924 			}
1925 		}
1926 	}
1927 
1928 	rx_ring->rx_packets++;
1929 	rx_ring->rx_bytes += skb->len;
1930 	skb_record_rx_queue(skb, rx_ring->cq_id);
1931 	if (vlan_id != 0xffff)
1932 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1933 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1934 		napi_gro_receive(&rx_ring->napi, skb);
1935 	else
1936 		netif_receive_skb(skb);
1937 }
1938 
1939 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_intr(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp)1940 static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1941 					      struct rx_ring *rx_ring,
1942 					      struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1943 {
1944 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1945 	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1946 		       (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1947 		((le16_to_cpu(ib_mac_rsp->vlan_id) &
1948 		  IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1949 
1950 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1951 		/* The data and headers are split into
1952 		 * separate buffers.
1953 		 */
1954 		qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1955 					       vlan_id);
1956 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1957 		/* The data fit in a single small buffer.
1958 		 * Allocate a new skb, copy the data and
1959 		 * return the buffer to the free pool.
1960 		 */
1961 		qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1962 					vlan_id);
1963 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1964 		   !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1965 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1966 		/* TCP packet in a page chunk that's been checksummed.
1967 		 * Tack it on to our GRO skb and let it go.
1968 		 */
1969 		qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1970 					     vlan_id);
1971 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1972 		/* Non-TCP packet in a page chunk. Allocate an
1973 		 * skb, tack it on frags, and send it up.
1974 		 */
1975 		qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1976 					 vlan_id);
1977 	} else {
1978 		/* Non-TCP/UDP large frames that span multiple buffers
1979 		 * can be processed correctly by the split frame logic.
1980 		 */
1981 		qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1982 					       vlan_id);
1983 	}
1984 
1985 	return (unsigned long)length;
1986 }
1987 
1988 /* Process an outbound completion from an rx ring. */
qlge_process_mac_tx_intr(struct qlge_adapter * qdev,struct qlge_ob_mac_iocb_rsp * mac_rsp)1989 static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1990 				     struct qlge_ob_mac_iocb_rsp *mac_rsp)
1991 {
1992 	struct tx_ring *tx_ring;
1993 	struct tx_ring_desc *tx_ring_desc;
1994 
1995 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1996 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1997 	qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1998 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1999 	tx_ring->tx_packets++;
2000 	dev_kfree_skb(tx_ring_desc->skb);
2001 	tx_ring_desc->skb = NULL;
2002 
2003 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2004 					OB_MAC_IOCB_RSP_S |
2005 					OB_MAC_IOCB_RSP_L |
2006 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2007 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2008 			netif_warn(qdev, tx_done, qdev->ndev,
2009 				   "Total descriptor length did not match transfer length.\n");
2010 		}
2011 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2012 			netif_warn(qdev, tx_done, qdev->ndev,
2013 				   "Frame too short to be valid, not sent.\n");
2014 		}
2015 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2016 			netif_warn(qdev, tx_done, qdev->ndev,
2017 				   "Frame too long, but sent anyway.\n");
2018 		}
2019 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2020 			netif_warn(qdev, tx_done, qdev->ndev,
2021 				   "PCI backplane error. Frame not sent.\n");
2022 		}
2023 	}
2024 	atomic_inc(&tx_ring->tx_count);
2025 }
2026 
2027 /* Fire up a handler to reset the MPI processor. */
qlge_queue_fw_error(struct qlge_adapter * qdev)2028 void qlge_queue_fw_error(struct qlge_adapter *qdev)
2029 {
2030 	qlge_link_off(qdev);
2031 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2032 }
2033 
qlge_queue_asic_error(struct qlge_adapter * qdev)2034 void qlge_queue_asic_error(struct qlge_adapter *qdev)
2035 {
2036 	qlge_link_off(qdev);
2037 	qlge_disable_interrupts(qdev);
2038 	/* Clear adapter up bit to signal the recovery
2039 	 * process that it shouldn't kill the reset worker
2040 	 * thread
2041 	 */
2042 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2043 	/* Set asic recovery bit to indicate reset process that we are
2044 	 * in fatal error recovery process rather than normal close
2045 	 */
2046 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2047 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2048 }
2049 
qlge_process_chip_ae_intr(struct qlge_adapter * qdev,struct qlge_ib_ae_iocb_rsp * ib_ae_rsp)2050 static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2051 				      struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2052 {
2053 	switch (ib_ae_rsp->event) {
2054 	case MGMT_ERR_EVENT:
2055 		netif_err(qdev, rx_err, qdev->ndev,
2056 			  "Management Processor Fatal Error.\n");
2057 		qlge_queue_fw_error(qdev);
2058 		return;
2059 
2060 	case CAM_LOOKUP_ERR_EVENT:
2061 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2062 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2063 		qlge_queue_asic_error(qdev);
2064 		return;
2065 
2066 	case SOFT_ECC_ERROR_EVENT:
2067 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2068 		qlge_queue_asic_error(qdev);
2069 		break;
2070 
2071 	case PCI_ERR_ANON_BUF_RD:
2072 		netdev_err(qdev->ndev,
2073 			   "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2074 			   ib_ae_rsp->q_id);
2075 		qlge_queue_asic_error(qdev);
2076 		break;
2077 
2078 	default:
2079 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2080 			  ib_ae_rsp->event);
2081 		qlge_queue_asic_error(qdev);
2082 		break;
2083 	}
2084 }
2085 
qlge_clean_outbound_rx_ring(struct rx_ring * rx_ring)2086 static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2087 {
2088 	struct qlge_adapter *qdev = rx_ring->qdev;
2089 	u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2090 	struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2091 	int count = 0;
2092 
2093 	struct tx_ring *tx_ring;
2094 	/* While there are entries in the completion queue. */
2095 	while (prod != rx_ring->cnsmr_idx) {
2096 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2097 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2098 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2099 
2100 		net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2101 		rmb();
2102 		switch (net_rsp->opcode) {
2103 		case OPCODE_OB_MAC_TSO_IOCB:
2104 		case OPCODE_OB_MAC_IOCB:
2105 			qlge_process_mac_tx_intr(qdev, net_rsp);
2106 			break;
2107 		default:
2108 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2109 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2110 				     net_rsp->opcode);
2111 		}
2112 		count++;
2113 		qlge_update_cq(rx_ring);
2114 		prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2115 	}
2116 	if (!net_rsp)
2117 		return 0;
2118 	qlge_write_cq_idx(rx_ring);
2119 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2120 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2121 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2122 			/*
2123 			 * The queue got stopped because the tx_ring was full.
2124 			 * Wake it up, because it's now at least 25% empty.
2125 			 */
2126 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2127 	}
2128 
2129 	return count;
2130 }
2131 
qlge_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2132 static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2133 {
2134 	struct qlge_adapter *qdev = rx_ring->qdev;
2135 	u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2136 	struct qlge_net_rsp_iocb *net_rsp;
2137 	int count = 0;
2138 
2139 	/* While there are entries in the completion queue. */
2140 	while (prod != rx_ring->cnsmr_idx) {
2141 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2142 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2143 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2144 
2145 		net_rsp = rx_ring->curr_entry;
2146 		rmb();
2147 		switch (net_rsp->opcode) {
2148 		case OPCODE_IB_MAC_IOCB:
2149 			qlge_process_mac_rx_intr(qdev, rx_ring,
2150 						 (struct qlge_ib_mac_iocb_rsp *)
2151 						 net_rsp);
2152 			break;
2153 
2154 		case OPCODE_IB_AE_IOCB:
2155 			qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2156 						  net_rsp);
2157 			break;
2158 		default:
2159 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2160 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2161 				     net_rsp->opcode);
2162 			break;
2163 		}
2164 		count++;
2165 		qlge_update_cq(rx_ring);
2166 		prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2167 		if (count == budget)
2168 			break;
2169 	}
2170 	qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2171 	qlge_write_cq_idx(rx_ring);
2172 	return count;
2173 }
2174 
qlge_napi_poll_msix(struct napi_struct * napi,int budget)2175 static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2176 {
2177 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2178 	struct qlge_adapter *qdev = rx_ring->qdev;
2179 	struct rx_ring *trx_ring;
2180 	int i, work_done = 0;
2181 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2182 
2183 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2184 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2185 
2186 	/* Service the TX rings first.  They start
2187 	 * right after the RSS rings.
2188 	 */
2189 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2190 		trx_ring = &qdev->rx_ring[i];
2191 		/* If this TX completion ring belongs to this vector and
2192 		 * it's not empty then service it.
2193 		 */
2194 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2195 		    (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2196 		     trx_ring->cnsmr_idx)) {
2197 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2198 				     "%s: Servicing TX completion ring %d.\n",
2199 				     __func__, trx_ring->cq_id);
2200 			qlge_clean_outbound_rx_ring(trx_ring);
2201 		}
2202 	}
2203 
2204 	/*
2205 	 * Now service the RSS ring if it's active.
2206 	 */
2207 	if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2208 	    rx_ring->cnsmr_idx) {
2209 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2210 			     "%s: Servicing RX completion ring %d.\n",
2211 			     __func__, rx_ring->cq_id);
2212 		work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2213 	}
2214 
2215 	if (work_done < budget) {
2216 		napi_complete_done(napi, work_done);
2217 		qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2218 	}
2219 	return work_done;
2220 }
2221 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2222 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2223 {
2224 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2225 
2226 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2227 		qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2228 			     NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2229 	} else {
2230 		qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2231 	}
2232 }
2233 
2234 /*
2235  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2236  * based on the features to enable/disable hardware vlan accel
2237  */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2238 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2239 					netdev_features_t features)
2240 {
2241 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2242 	bool need_restart = netif_running(ndev);
2243 	int status = 0;
2244 
2245 	if (need_restart) {
2246 		status = qlge_adapter_down(qdev);
2247 		if (status) {
2248 			netif_err(qdev, link, qdev->ndev,
2249 				  "Failed to bring down the adapter\n");
2250 			return status;
2251 		}
2252 	}
2253 
2254 	/* update the features with resent change */
2255 	ndev->features = features;
2256 
2257 	if (need_restart) {
2258 		status = qlge_adapter_up(qdev);
2259 		if (status) {
2260 			netif_err(qdev, link, qdev->ndev,
2261 				  "Failed to bring up the adapter\n");
2262 			return status;
2263 		}
2264 	}
2265 
2266 	return status;
2267 }
2268 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2269 static int qlge_set_features(struct net_device *ndev,
2270 			     netdev_features_t features)
2271 {
2272 	netdev_features_t changed = ndev->features ^ features;
2273 	int err;
2274 
2275 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2276 		/* Update the behavior of vlan accel in the adapter */
2277 		err = qlge_update_hw_vlan_features(ndev, features);
2278 		if (err)
2279 			return err;
2280 
2281 		qlge_vlan_mode(ndev, features);
2282 	}
2283 
2284 	return 0;
2285 }
2286 
__qlge_vlan_rx_add_vid(struct qlge_adapter * qdev,u16 vid)2287 static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2288 {
2289 	u32 enable_bit = MAC_ADDR_E;
2290 	int err;
2291 
2292 	err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2293 				    MAC_ADDR_TYPE_VLAN, vid);
2294 	if (err)
2295 		netif_err(qdev, ifup, qdev->ndev,
2296 			  "Failed to init vlan address.\n");
2297 	return err;
2298 }
2299 
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2300 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2301 {
2302 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2303 	int status;
2304 	int err;
2305 
2306 	status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2307 	if (status)
2308 		return status;
2309 
2310 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2311 	set_bit(vid, qdev->active_vlans);
2312 
2313 	qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2314 
2315 	return err;
2316 }
2317 
__qlge_vlan_rx_kill_vid(struct qlge_adapter * qdev,u16 vid)2318 static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2319 {
2320 	u32 enable_bit = 0;
2321 	int err;
2322 
2323 	err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2324 				    MAC_ADDR_TYPE_VLAN, vid);
2325 	if (err)
2326 		netif_err(qdev, ifup, qdev->ndev,
2327 			  "Failed to clear vlan address.\n");
2328 	return err;
2329 }
2330 
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2331 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2332 {
2333 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2334 	int status;
2335 	int err;
2336 
2337 	status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2338 	if (status)
2339 		return status;
2340 
2341 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2342 	clear_bit(vid, qdev->active_vlans);
2343 
2344 	qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2345 
2346 	return err;
2347 }
2348 
qlge_restore_vlan(struct qlge_adapter * qdev)2349 static void qlge_restore_vlan(struct qlge_adapter *qdev)
2350 {
2351 	int status;
2352 	u16 vid;
2353 
2354 	status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2355 	if (status)
2356 		return;
2357 
2358 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2359 		__qlge_vlan_rx_add_vid(qdev, vid);
2360 
2361 	qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2362 }
2363 
2364 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2365 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2366 {
2367 	struct rx_ring *rx_ring = dev_id;
2368 
2369 	napi_schedule(&rx_ring->napi);
2370 	return IRQ_HANDLED;
2371 }
2372 
2373 /* This handles a fatal error, MPI activity, and the default
2374  * rx_ring in an MSI-X multiple vector environment.
2375  * In MSI/Legacy environment it also process the rest of
2376  * the rx_rings.
2377  */
qlge_isr(int irq,void * dev_id)2378 static irqreturn_t qlge_isr(int irq, void *dev_id)
2379 {
2380 	struct rx_ring *rx_ring = dev_id;
2381 	struct qlge_adapter *qdev = rx_ring->qdev;
2382 	struct intr_context *intr_context = &qdev->intr_context[0];
2383 	u32 var;
2384 	int work_done = 0;
2385 
2386 	/* Experience shows that when using INTx interrupts, interrupts must
2387 	 * be masked manually.
2388 	 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2389 	 * (even though it is auto-masked), otherwise a later command to
2390 	 * enable it is not effective.
2391 	 */
2392 	if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2393 		qlge_disable_completion_interrupt(qdev, 0);
2394 
2395 	var = qlge_read32(qdev, STS);
2396 
2397 	/*
2398 	 * Check for fatal error.
2399 	 */
2400 	if (var & STS_FE) {
2401 		qlge_disable_completion_interrupt(qdev, 0);
2402 		qlge_queue_asic_error(qdev);
2403 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2404 		var = qlge_read32(qdev, ERR_STS);
2405 		netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2406 		return IRQ_HANDLED;
2407 	}
2408 
2409 	/*
2410 	 * Check MPI processor activity.
2411 	 */
2412 	if ((var & STS_PI) &&
2413 	    (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2414 		/*
2415 		 * We've got an async event or mailbox completion.
2416 		 * Handle it and clear the source of the interrupt.
2417 		 */
2418 		netif_err(qdev, intr, qdev->ndev,
2419 			  "Got MPI processor interrupt.\n");
2420 		qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2421 		queue_delayed_work_on(smp_processor_id(),
2422 				      qdev->workqueue, &qdev->mpi_work, 0);
2423 		work_done++;
2424 	}
2425 
2426 	/*
2427 	 * Get the bit-mask that shows the active queues for this
2428 	 * pass.  Compare it to the queues that this irq services
2429 	 * and call napi if there's a match.
2430 	 */
2431 	var = qlge_read32(qdev, ISR1);
2432 	if (var & intr_context->irq_mask) {
2433 		netif_info(qdev, intr, qdev->ndev,
2434 			   "Waking handler for rx_ring[0].\n");
2435 		napi_schedule(&rx_ring->napi);
2436 		work_done++;
2437 	} else {
2438 		/* Experience shows that the device sometimes signals an
2439 		 * interrupt but no work is scheduled from this function.
2440 		 * Nevertheless, the interrupt is auto-masked. Therefore, we
2441 		 * systematically re-enable the interrupt if we didn't
2442 		 * schedule napi.
2443 		 */
2444 		qlge_enable_completion_interrupt(qdev, 0);
2445 	}
2446 
2447 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2448 }
2449 
qlge_tso(struct sk_buff * skb,struct qlge_ob_mac_tso_iocb_req * mac_iocb_ptr)2450 static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2451 {
2452 	if (skb_is_gso(skb)) {
2453 		int err;
2454 		__be16 l3_proto = vlan_get_protocol(skb);
2455 
2456 		err = skb_cow_head(skb, 0);
2457 		if (err < 0)
2458 			return err;
2459 
2460 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2461 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2462 		mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2463 		mac_iocb_ptr->total_hdrs_len =
2464 			cpu_to_le16(skb_tcp_all_headers(skb));
2465 		mac_iocb_ptr->net_trans_offset =
2466 			cpu_to_le16(skb_network_offset(skb) |
2467 				    skb_transport_offset(skb)
2468 				    << OB_MAC_TRANSPORT_HDR_SHIFT);
2469 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2470 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2471 		if (likely(l3_proto == htons(ETH_P_IP))) {
2472 			struct iphdr *iph = ip_hdr(skb);
2473 
2474 			iph->check = 0;
2475 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2476 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2477 								 iph->daddr, 0,
2478 								 IPPROTO_TCP,
2479 								 0);
2480 		} else if (l3_proto == htons(ETH_P_IPV6)) {
2481 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2482 			tcp_hdr(skb)->check =
2483 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2484 						 &ipv6_hdr(skb)->daddr,
2485 						 0, IPPROTO_TCP, 0);
2486 		}
2487 		return 1;
2488 	}
2489 	return 0;
2490 }
2491 
qlge_hw_csum_setup(struct sk_buff * skb,struct qlge_ob_mac_tso_iocb_req * mac_iocb_ptr)2492 static void qlge_hw_csum_setup(struct sk_buff *skb,
2493 			       struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2494 {
2495 	int len;
2496 	struct iphdr *iph = ip_hdr(skb);
2497 	__sum16 *check;
2498 
2499 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2500 	mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2501 	mac_iocb_ptr->net_trans_offset =
2502 		cpu_to_le16(skb_network_offset(skb) |
2503 			    skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2504 
2505 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2506 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2507 	if (likely(iph->protocol == IPPROTO_TCP)) {
2508 		check = &(tcp_hdr(skb)->check);
2509 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2510 		mac_iocb_ptr->total_hdrs_len =
2511 			cpu_to_le16(skb_transport_offset(skb) +
2512 				    (tcp_hdr(skb)->doff << 2));
2513 	} else {
2514 		check = &(udp_hdr(skb)->check);
2515 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2516 		mac_iocb_ptr->total_hdrs_len =
2517 			cpu_to_le16(skb_transport_offset(skb) +
2518 				    sizeof(struct udphdr));
2519 	}
2520 	*check = ~csum_tcpudp_magic(iph->saddr,
2521 				    iph->daddr, len, iph->protocol, 0);
2522 }
2523 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2524 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2525 {
2526 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2527 	struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2528 	struct tx_ring_desc *tx_ring_desc;
2529 	int tso;
2530 	struct tx_ring *tx_ring;
2531 	u32 tx_ring_idx = (u32)skb->queue_mapping;
2532 
2533 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2534 
2535 	if (skb_padto(skb, ETH_ZLEN))
2536 		return NETDEV_TX_OK;
2537 
2538 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2539 		netif_info(qdev, tx_queued, qdev->ndev,
2540 			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2541 			   __func__, tx_ring_idx);
2542 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2543 		tx_ring->tx_errors++;
2544 		return NETDEV_TX_BUSY;
2545 	}
2546 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2547 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2548 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2549 
2550 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2551 	mac_iocb_ptr->tid = tx_ring_desc->index;
2552 	/* We use the upper 32-bits to store the tx queue for this IO.
2553 	 * When we get the completion we can use it to establish the context.
2554 	 */
2555 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2556 	tx_ring_desc->skb = skb;
2557 
2558 	mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2559 
2560 	if (skb_vlan_tag_present(skb)) {
2561 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2562 			     "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2563 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2564 		mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2565 	}
2566 	tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2567 	if (tso < 0) {
2568 		dev_kfree_skb_any(skb);
2569 		return NETDEV_TX_OK;
2570 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2571 		qlge_hw_csum_setup(skb,
2572 				   (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2573 	}
2574 	if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2575 	    NETDEV_TX_OK) {
2576 		netif_err(qdev, tx_queued, qdev->ndev,
2577 			  "Could not map the segments.\n");
2578 		tx_ring->tx_errors++;
2579 		return NETDEV_TX_BUSY;
2580 	}
2581 
2582 	tx_ring->prod_idx++;
2583 	if (tx_ring->prod_idx == tx_ring->wq_len)
2584 		tx_ring->prod_idx = 0;
2585 	wmb();
2586 
2587 	qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2588 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2589 		     "tx queued, slot %d, len %d\n",
2590 		     tx_ring->prod_idx, skb->len);
2591 
2592 	atomic_dec(&tx_ring->tx_count);
2593 
2594 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2595 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2596 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2597 			/*
2598 			 * The queue got stopped because the tx_ring was full.
2599 			 * Wake it up, because it's now at least 25% empty.
2600 			 */
2601 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2602 	}
2603 	return NETDEV_TX_OK;
2604 }
2605 
qlge_free_shadow_space(struct qlge_adapter * qdev)2606 static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2607 {
2608 	if (qdev->rx_ring_shadow_reg_area) {
2609 		dma_free_coherent(&qdev->pdev->dev,
2610 				  PAGE_SIZE,
2611 				  qdev->rx_ring_shadow_reg_area,
2612 				  qdev->rx_ring_shadow_reg_dma);
2613 		qdev->rx_ring_shadow_reg_area = NULL;
2614 	}
2615 	if (qdev->tx_ring_shadow_reg_area) {
2616 		dma_free_coherent(&qdev->pdev->dev,
2617 				  PAGE_SIZE,
2618 				  qdev->tx_ring_shadow_reg_area,
2619 				  qdev->tx_ring_shadow_reg_dma);
2620 		qdev->tx_ring_shadow_reg_area = NULL;
2621 	}
2622 }
2623 
qlge_alloc_shadow_space(struct qlge_adapter * qdev)2624 static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2625 {
2626 	qdev->rx_ring_shadow_reg_area =
2627 		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2628 				   &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2629 	if (!qdev->rx_ring_shadow_reg_area) {
2630 		netif_err(qdev, ifup, qdev->ndev,
2631 			  "Allocation of RX shadow space failed.\n");
2632 		return -ENOMEM;
2633 	}
2634 
2635 	qdev->tx_ring_shadow_reg_area =
2636 		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2637 				   &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2638 	if (!qdev->tx_ring_shadow_reg_area) {
2639 		netif_err(qdev, ifup, qdev->ndev,
2640 			  "Allocation of TX shadow space failed.\n");
2641 		goto err_wqp_sh_area;
2642 	}
2643 	return 0;
2644 
2645 err_wqp_sh_area:
2646 	dma_free_coherent(&qdev->pdev->dev,
2647 			  PAGE_SIZE,
2648 			  qdev->rx_ring_shadow_reg_area,
2649 			  qdev->rx_ring_shadow_reg_dma);
2650 	return -ENOMEM;
2651 }
2652 
qlge_init_tx_ring(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2653 static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2654 {
2655 	struct tx_ring_desc *tx_ring_desc;
2656 	int i;
2657 	struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2658 
2659 	mac_iocb_ptr = tx_ring->wq_base;
2660 	tx_ring_desc = tx_ring->q;
2661 	for (i = 0; i < tx_ring->wq_len; i++) {
2662 		tx_ring_desc->index = i;
2663 		tx_ring_desc->skb = NULL;
2664 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2665 		mac_iocb_ptr++;
2666 		tx_ring_desc++;
2667 	}
2668 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2669 }
2670 
qlge_free_tx_resources(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2671 static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2672 				   struct tx_ring *tx_ring)
2673 {
2674 	if (tx_ring->wq_base) {
2675 		dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2676 				  tx_ring->wq_base, tx_ring->wq_base_dma);
2677 		tx_ring->wq_base = NULL;
2678 	}
2679 	kfree(tx_ring->q);
2680 	tx_ring->q = NULL;
2681 }
2682 
qlge_alloc_tx_resources(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2683 static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2684 				   struct tx_ring *tx_ring)
2685 {
2686 	tx_ring->wq_base =
2687 		dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2688 				   &tx_ring->wq_base_dma, GFP_ATOMIC);
2689 
2690 	if (!tx_ring->wq_base ||
2691 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2692 		goto pci_alloc_err;
2693 
2694 	tx_ring->q =
2695 		kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2696 			      GFP_KERNEL);
2697 	if (!tx_ring->q)
2698 		goto err;
2699 
2700 	return 0;
2701 err:
2702 	dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2703 			  tx_ring->wq_base, tx_ring->wq_base_dma);
2704 	tx_ring->wq_base = NULL;
2705 pci_alloc_err:
2706 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2707 	return -ENOMEM;
2708 }
2709 
qlge_free_lbq_buffers(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2710 static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2711 {
2712 	struct qlge_bq *lbq = &rx_ring->lbq;
2713 	unsigned int last_offset;
2714 
2715 	last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2716 	while (lbq->next_to_clean != lbq->next_to_use) {
2717 		struct qlge_bq_desc *lbq_desc =
2718 			&lbq->queue[lbq->next_to_clean];
2719 
2720 		if (lbq_desc->p.pg_chunk.offset == last_offset)
2721 			dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2722 				       qlge_lbq_block_size(qdev),
2723 				       DMA_FROM_DEVICE);
2724 		put_page(lbq_desc->p.pg_chunk.page);
2725 
2726 		lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2727 	}
2728 
2729 	if (rx_ring->master_chunk.page) {
2730 		dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2731 			       qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2732 		put_page(rx_ring->master_chunk.page);
2733 		rx_ring->master_chunk.page = NULL;
2734 	}
2735 }
2736 
qlge_free_sbq_buffers(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2737 static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2738 {
2739 	int i;
2740 
2741 	for (i = 0; i < QLGE_BQ_LEN; i++) {
2742 		struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2743 
2744 		if (!sbq_desc) {
2745 			netif_err(qdev, ifup, qdev->ndev,
2746 				  "sbq_desc %d is NULL.\n", i);
2747 			return;
2748 		}
2749 		if (sbq_desc->p.skb) {
2750 			dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2751 					 SMALL_BUF_MAP_SIZE,
2752 					 DMA_FROM_DEVICE);
2753 			dev_kfree_skb(sbq_desc->p.skb);
2754 			sbq_desc->p.skb = NULL;
2755 		}
2756 	}
2757 }
2758 
2759 /* Free all large and small rx buffers associated
2760  * with the completion queues for this device.
2761  */
qlge_free_rx_buffers(struct qlge_adapter * qdev)2762 static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2763 {
2764 	int i;
2765 
2766 	for (i = 0; i < qdev->rx_ring_count; i++) {
2767 		struct rx_ring *rx_ring = &qdev->rx_ring[i];
2768 
2769 		if (rx_ring->lbq.queue)
2770 			qlge_free_lbq_buffers(qdev, rx_ring);
2771 		if (rx_ring->sbq.queue)
2772 			qlge_free_sbq_buffers(qdev, rx_ring);
2773 	}
2774 }
2775 
qlge_alloc_rx_buffers(struct qlge_adapter * qdev)2776 static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2777 {
2778 	int i;
2779 
2780 	for (i = 0; i < qdev->rss_ring_count; i++)
2781 		qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2782 					  HZ / 2);
2783 }
2784 
qlge_init_bq(struct qlge_bq * bq)2785 static int qlge_init_bq(struct qlge_bq *bq)
2786 {
2787 	struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2788 	struct qlge_adapter *qdev = rx_ring->qdev;
2789 	struct qlge_bq_desc *bq_desc;
2790 	__le64 *buf_ptr;
2791 	int i;
2792 
2793 	bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2794 				      &bq->base_dma, GFP_ATOMIC);
2795 	if (!bq->base)
2796 		return -ENOMEM;
2797 
2798 	bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2799 				  GFP_KERNEL);
2800 	if (!bq->queue)
2801 		return -ENOMEM;
2802 
2803 	buf_ptr = bq->base;
2804 	bq_desc = &bq->queue[0];
2805 	for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2806 		bq_desc->p.skb = NULL;
2807 		bq_desc->index = i;
2808 		bq_desc->buf_ptr = buf_ptr;
2809 	}
2810 
2811 	return 0;
2812 }
2813 
qlge_free_rx_resources(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2814 static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2815 				   struct rx_ring *rx_ring)
2816 {
2817 	/* Free the small buffer queue. */
2818 	if (rx_ring->sbq.base) {
2819 		dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2820 				  rx_ring->sbq.base, rx_ring->sbq.base_dma);
2821 		rx_ring->sbq.base = NULL;
2822 	}
2823 
2824 	/* Free the small buffer queue control blocks. */
2825 	kfree(rx_ring->sbq.queue);
2826 	rx_ring->sbq.queue = NULL;
2827 
2828 	/* Free the large buffer queue. */
2829 	if (rx_ring->lbq.base) {
2830 		dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2831 				  rx_ring->lbq.base, rx_ring->lbq.base_dma);
2832 		rx_ring->lbq.base = NULL;
2833 	}
2834 
2835 	/* Free the large buffer queue control blocks. */
2836 	kfree(rx_ring->lbq.queue);
2837 	rx_ring->lbq.queue = NULL;
2838 
2839 	/* Free the rx queue. */
2840 	if (rx_ring->cq_base) {
2841 		dma_free_coherent(&qdev->pdev->dev,
2842 				  rx_ring->cq_size,
2843 				  rx_ring->cq_base, rx_ring->cq_base_dma);
2844 		rx_ring->cq_base = NULL;
2845 	}
2846 }
2847 
2848 /* Allocate queues and buffers for this completions queue based
2849  * on the values in the parameter structure.
2850  */
qlge_alloc_rx_resources(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2851 static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2852 				   struct rx_ring *rx_ring)
2853 {
2854 	/*
2855 	 * Allocate the completion queue for this rx_ring.
2856 	 */
2857 	rx_ring->cq_base =
2858 		dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2859 				   &rx_ring->cq_base_dma, GFP_ATOMIC);
2860 
2861 	if (!rx_ring->cq_base) {
2862 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2863 		return -ENOMEM;
2864 	}
2865 
2866 	if (rx_ring->cq_id < qdev->rss_ring_count &&
2867 	    (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2868 		qlge_free_rx_resources(qdev, rx_ring);
2869 		return -ENOMEM;
2870 	}
2871 
2872 	return 0;
2873 }
2874 
qlge_tx_ring_clean(struct qlge_adapter * qdev)2875 static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2876 {
2877 	struct tx_ring *tx_ring;
2878 	struct tx_ring_desc *tx_ring_desc;
2879 	int i, j;
2880 
2881 	/*
2882 	 * Loop through all queues and free
2883 	 * any resources.
2884 	 */
2885 	for (j = 0; j < qdev->tx_ring_count; j++) {
2886 		tx_ring = &qdev->tx_ring[j];
2887 		for (i = 0; i < tx_ring->wq_len; i++) {
2888 			tx_ring_desc = &tx_ring->q[i];
2889 			if (tx_ring_desc && tx_ring_desc->skb) {
2890 				netif_err(qdev, ifdown, qdev->ndev,
2891 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
2892 					  tx_ring_desc->skb, j,
2893 					  tx_ring_desc->index);
2894 				qlge_unmap_send(qdev, tx_ring_desc,
2895 						tx_ring_desc->map_cnt);
2896 				dev_kfree_skb(tx_ring_desc->skb);
2897 				tx_ring_desc->skb = NULL;
2898 			}
2899 		}
2900 	}
2901 }
2902 
qlge_free_mem_resources(struct qlge_adapter * qdev)2903 static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2904 {
2905 	int i;
2906 
2907 	for (i = 0; i < qdev->tx_ring_count; i++)
2908 		qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2909 	for (i = 0; i < qdev->rx_ring_count; i++)
2910 		qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2911 	qlge_free_shadow_space(qdev);
2912 }
2913 
qlge_alloc_mem_resources(struct qlge_adapter * qdev)2914 static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2915 {
2916 	int i;
2917 
2918 	/* Allocate space for our shadow registers and such. */
2919 	if (qlge_alloc_shadow_space(qdev))
2920 		return -ENOMEM;
2921 
2922 	for (i = 0; i < qdev->rx_ring_count; i++) {
2923 		if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2924 			netif_err(qdev, ifup, qdev->ndev,
2925 				  "RX resource allocation failed.\n");
2926 			goto err_mem;
2927 		}
2928 	}
2929 	/* Allocate tx queue resources */
2930 	for (i = 0; i < qdev->tx_ring_count; i++) {
2931 		if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2932 			netif_err(qdev, ifup, qdev->ndev,
2933 				  "TX resource allocation failed.\n");
2934 			goto err_mem;
2935 		}
2936 	}
2937 	return 0;
2938 
2939 err_mem:
2940 	qlge_free_mem_resources(qdev);
2941 	return -ENOMEM;
2942 }
2943 
2944 /* Set up the rx ring control block and pass it to the chip.
2945  * The control block is defined as
2946  * "Completion Queue Initialization Control Block", or cqicb.
2947  */
qlge_start_rx_ring(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2948 static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2949 {
2950 	struct cqicb *cqicb = &rx_ring->cqicb;
2951 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2952 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2953 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2954 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2955 	void __iomem *doorbell_area =
2956 		qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2957 	int err = 0;
2958 	u64 dma;
2959 	__le64 *base_indirect_ptr;
2960 	int page_entries;
2961 
2962 	/* Set up the shadow registers for this ring. */
2963 	rx_ring->prod_idx_sh_reg = shadow_reg;
2964 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2965 	*rx_ring->prod_idx_sh_reg = 0;
2966 	shadow_reg += sizeof(u64);
2967 	shadow_reg_dma += sizeof(u64);
2968 	rx_ring->lbq.base_indirect = shadow_reg;
2969 	rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2970 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2971 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2972 	rx_ring->sbq.base_indirect = shadow_reg;
2973 	rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2974 
2975 	/* PCI doorbell mem area + 0x00 for consumer index register */
2976 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2977 	rx_ring->cnsmr_idx = 0;
2978 	rx_ring->curr_entry = rx_ring->cq_base;
2979 
2980 	/* PCI doorbell mem area + 0x04 for valid register */
2981 	rx_ring->valid_db_reg = doorbell_area + 0x04;
2982 
2983 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
2984 	rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2985 
2986 	/* PCI doorbell mem area + 0x1c */
2987 	rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2988 
2989 	memset((void *)cqicb, 0, sizeof(struct cqicb));
2990 	cqicb->msix_vect = rx_ring->irq;
2991 
2992 	cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
2993 				 LEN_CPP_CONT);
2994 
2995 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2996 
2997 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2998 
2999 	/*
3000 	 * Set up the control block load flags.
3001 	 */
3002 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3003 		FLAGS_LV |		/* Load MSI-X vector */
3004 		FLAGS_LI;		/* Load irq delay values */
3005 	if (rx_ring->cq_id < qdev->rss_ring_count) {
3006 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3007 		dma = (u64)rx_ring->lbq.base_dma;
3008 		base_indirect_ptr = rx_ring->lbq.base_indirect;
3009 
3010 		for (page_entries = 0;
3011 		     page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3012 		     page_entries++) {
3013 			base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3014 			dma += DB_PAGE_SIZE;
3015 		}
3016 		cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3017 		cqicb->lbq_buf_size =
3018 			cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3019 		cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3020 		rx_ring->lbq.next_to_use = 0;
3021 		rx_ring->lbq.next_to_clean = 0;
3022 
3023 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3024 		dma = (u64)rx_ring->sbq.base_dma;
3025 		base_indirect_ptr = rx_ring->sbq.base_indirect;
3026 
3027 		for (page_entries = 0;
3028 		     page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
3029 		     page_entries++) {
3030 			base_indirect_ptr[page_entries] = cpu_to_le64(dma);
3031 			dma += DB_PAGE_SIZE;
3032 		}
3033 		cqicb->sbq_addr =
3034 			cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3035 		cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3036 		cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3037 		rx_ring->sbq.next_to_use = 0;
3038 		rx_ring->sbq.next_to_clean = 0;
3039 	}
3040 	if (rx_ring->cq_id < qdev->rss_ring_count) {
3041 		/* Inbound completion handling rx_rings run in
3042 		 * separate NAPI contexts.
3043 		 */
3044 		netif_napi_add(qdev->ndev, &rx_ring->napi,
3045 			       qlge_napi_poll_msix);
3046 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3047 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3048 	} else {
3049 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3050 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3051 	}
3052 	err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3053 			     CFG_LCQ, rx_ring->cq_id);
3054 	if (err) {
3055 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3056 		return err;
3057 	}
3058 	return err;
3059 }
3060 
qlge_start_tx_ring(struct qlge_adapter * qdev,struct tx_ring * tx_ring)3061 static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3062 {
3063 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3064 	void __iomem *doorbell_area =
3065 		qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3066 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3067 		(tx_ring->wq_id * sizeof(u64));
3068 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3069 		(tx_ring->wq_id * sizeof(u64));
3070 	int err = 0;
3071 
3072 	/*
3073 	 * Assign doorbell registers for this tx_ring.
3074 	 */
3075 	/* TX PCI doorbell mem area for tx producer index */
3076 	tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3077 	tx_ring->prod_idx = 0;
3078 	/* TX PCI doorbell mem area + 0x04 */
3079 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3080 
3081 	/*
3082 	 * Assign shadow registers for this tx_ring.
3083 	 */
3084 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3085 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3086 
3087 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3088 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3089 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3090 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3091 	wqicb->rid = 0;
3092 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3093 
3094 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3095 
3096 	qlge_init_tx_ring(qdev, tx_ring);
3097 
3098 	err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3099 			     (u16)tx_ring->wq_id);
3100 	if (err) {
3101 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3102 		return err;
3103 	}
3104 	return err;
3105 }
3106 
qlge_disable_msix(struct qlge_adapter * qdev)3107 static void qlge_disable_msix(struct qlge_adapter *qdev)
3108 {
3109 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3110 		pci_disable_msix(qdev->pdev);
3111 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3112 		kfree(qdev->msi_x_entry);
3113 		qdev->msi_x_entry = NULL;
3114 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3115 		pci_disable_msi(qdev->pdev);
3116 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3117 	}
3118 }
3119 
3120 /* We start by trying to get the number of vectors
3121  * stored in qdev->intr_count. If we don't get that
3122  * many then we reduce the count and try again.
3123  */
qlge_enable_msix(struct qlge_adapter * qdev)3124 static void qlge_enable_msix(struct qlge_adapter *qdev)
3125 {
3126 	int i, err;
3127 
3128 	/* Get the MSIX vectors. */
3129 	if (qlge_irq_type == MSIX_IRQ) {
3130 		/* Try to alloc space for the msix struct,
3131 		 * if it fails then go to MSI/legacy.
3132 		 */
3133 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3134 					    sizeof(struct msix_entry),
3135 					    GFP_KERNEL);
3136 		if (!qdev->msi_x_entry) {
3137 			qlge_irq_type = MSI_IRQ;
3138 			goto msi;
3139 		}
3140 
3141 		for (i = 0; i < qdev->intr_count; i++)
3142 			qdev->msi_x_entry[i].entry = i;
3143 
3144 		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3145 					    1, qdev->intr_count);
3146 		if (err < 0) {
3147 			kfree(qdev->msi_x_entry);
3148 			qdev->msi_x_entry = NULL;
3149 			netif_warn(qdev, ifup, qdev->ndev,
3150 				   "MSI-X Enable failed, trying MSI.\n");
3151 			qlge_irq_type = MSI_IRQ;
3152 		} else {
3153 			qdev->intr_count = err;
3154 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3155 			netif_info(qdev, ifup, qdev->ndev,
3156 				   "MSI-X Enabled, got %d vectors.\n",
3157 				   qdev->intr_count);
3158 			return;
3159 		}
3160 	}
3161 msi:
3162 	qdev->intr_count = 1;
3163 	if (qlge_irq_type == MSI_IRQ) {
3164 		if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3165 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3166 			netif_info(qdev, ifup, qdev->ndev,
3167 				   "Running with MSI interrupts.\n");
3168 			return;
3169 		}
3170 	}
3171 	qlge_irq_type = LEG_IRQ;
3172 	set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3173 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3174 		     "Running with legacy interrupts.\n");
3175 }
3176 
3177 /* Each vector services 1 RSS ring and 1 or more
3178  * TX completion rings.  This function loops through
3179  * the TX completion rings and assigns the vector that
3180  * will service it.  An example would be if there are
3181  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3182  * This would mean that vector 0 would service RSS ring 0
3183  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3184  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3185  */
qlge_set_tx_vect(struct qlge_adapter * qdev)3186 static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3187 {
3188 	int i, j, vect;
3189 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3190 
3191 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3192 		/* Assign irq vectors to TX rx_rings.*/
3193 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3194 		     i < qdev->rx_ring_count; i++) {
3195 			if (j == tx_rings_per_vector) {
3196 				vect++;
3197 				j = 0;
3198 			}
3199 			qdev->rx_ring[i].irq = vect;
3200 			j++;
3201 		}
3202 	} else {
3203 		/* For single vector all rings have an irq
3204 		 * of zero.
3205 		 */
3206 		for (i = 0; i < qdev->rx_ring_count; i++)
3207 			qdev->rx_ring[i].irq = 0;
3208 	}
3209 }
3210 
3211 /* Set the interrupt mask for this vector.  Each vector
3212  * will service 1 RSS ring and 1 or more TX completion
3213  * rings.  This function sets up a bit mask per vector
3214  * that indicates which rings it services.
3215  */
qlge_set_irq_mask(struct qlge_adapter * qdev,struct intr_context * ctx)3216 static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3217 {
3218 	int j, vect = ctx->intr;
3219 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3220 
3221 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3222 		/* Add the RSS ring serviced by this vector
3223 		 * to the mask.
3224 		 */
3225 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3226 		/* Add the TX ring(s) serviced by this vector
3227 		 * to the mask.
3228 		 */
3229 		for (j = 0; j < tx_rings_per_vector; j++) {
3230 			ctx->irq_mask |=
3231 				(1 << qdev->rx_ring[qdev->rss_ring_count +
3232 				 (vect * tx_rings_per_vector) + j].cq_id);
3233 		}
3234 	} else {
3235 		/* For single vector we just shift each queue's
3236 		 * ID into the mask.
3237 		 */
3238 		for (j = 0; j < qdev->rx_ring_count; j++)
3239 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3240 	}
3241 }
3242 
3243 /*
3244  * Here we build the intr_context structures based on
3245  * our rx_ring count and intr vector count.
3246  * The intr_context structure is used to hook each vector
3247  * to possibly different handlers.
3248  */
qlge_resolve_queues_to_irqs(struct qlge_adapter * qdev)3249 static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3250 {
3251 	int i = 0;
3252 	struct intr_context *intr_context = &qdev->intr_context[0];
3253 
3254 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3255 		/* Each rx_ring has it's
3256 		 * own intr_context since we have separate
3257 		 * vectors for each queue.
3258 		 */
3259 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3260 			qdev->rx_ring[i].irq = i;
3261 			intr_context->intr = i;
3262 			intr_context->qdev = qdev;
3263 			/* Set up this vector's bit-mask that indicates
3264 			 * which queues it services.
3265 			 */
3266 			qlge_set_irq_mask(qdev, intr_context);
3267 			/*
3268 			 * We set up each vectors enable/disable/read bits so
3269 			 * there's no bit/mask calculations in the critical path.
3270 			 */
3271 			intr_context->intr_en_mask =
3272 				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3273 				INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3274 				| i;
3275 			intr_context->intr_dis_mask =
3276 				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3277 				INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3278 				INTR_EN_IHD | i;
3279 			intr_context->intr_read_mask =
3280 				INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 				INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3282 				i;
3283 			if (i == 0) {
3284 				/* The first vector/queue handles
3285 				 * broadcast/multicast, fatal errors,
3286 				 * and firmware events.  This in addition
3287 				 * to normal inbound NAPI processing.
3288 				 */
3289 				intr_context->handler = qlge_isr;
3290 				sprintf(intr_context->name, "%s-rx-%d",
3291 					qdev->ndev->name, i);
3292 			} else {
3293 				/*
3294 				 * Inbound queues handle unicast frames only.
3295 				 */
3296 				intr_context->handler = qlge_msix_rx_isr;
3297 				sprintf(intr_context->name, "%s-rx-%d",
3298 					qdev->ndev->name, i);
3299 			}
3300 		}
3301 	} else {
3302 		/*
3303 		 * All rx_rings use the same intr_context since
3304 		 * there is only one vector.
3305 		 */
3306 		intr_context->intr = 0;
3307 		intr_context->qdev = qdev;
3308 		/*
3309 		 * We set up each vectors enable/disable/read bits so
3310 		 * there's no bit/mask calculations in the critical path.
3311 		 */
3312 		intr_context->intr_en_mask =
3313 			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3314 		intr_context->intr_dis_mask =
3315 			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3316 			INTR_EN_TYPE_DISABLE;
3317 		if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3318 			/* Experience shows that when using INTx interrupts,
3319 			 * the device does not always auto-mask INTR_EN_EN.
3320 			 * Moreover, masking INTR_EN_EN manually does not
3321 			 * immediately prevent interrupt generation.
3322 			 */
3323 			intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3324 				INTR_EN_EI;
3325 			intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3326 		}
3327 		intr_context->intr_read_mask =
3328 			INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3329 		/*
3330 		 * Single interrupt means one handler for all rings.
3331 		 */
3332 		intr_context->handler = qlge_isr;
3333 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3334 		/* Set up this vector's bit-mask that indicates
3335 		 * which queues it services. In this case there is
3336 		 * a single vector so it will service all RSS and
3337 		 * TX completion rings.
3338 		 */
3339 		qlge_set_irq_mask(qdev, intr_context);
3340 	}
3341 	/* Tell the TX completion rings which MSIx vector
3342 	 * they will be using.
3343 	 */
3344 	qlge_set_tx_vect(qdev);
3345 }
3346 
qlge_free_irq(struct qlge_adapter * qdev)3347 static void qlge_free_irq(struct qlge_adapter *qdev)
3348 {
3349 	int i;
3350 	struct intr_context *intr_context = &qdev->intr_context[0];
3351 
3352 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3353 		if (intr_context->hooked) {
3354 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3355 				free_irq(qdev->msi_x_entry[i].vector,
3356 					 &qdev->rx_ring[i]);
3357 			} else {
3358 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3359 			}
3360 		}
3361 	}
3362 	qlge_disable_msix(qdev);
3363 }
3364 
qlge_request_irq(struct qlge_adapter * qdev)3365 static int qlge_request_irq(struct qlge_adapter *qdev)
3366 {
3367 	int i;
3368 	int status = 0;
3369 	struct pci_dev *pdev = qdev->pdev;
3370 	struct intr_context *intr_context = &qdev->intr_context[0];
3371 
3372 	qlge_resolve_queues_to_irqs(qdev);
3373 
3374 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3375 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3376 			status = request_irq(qdev->msi_x_entry[i].vector,
3377 					     intr_context->handler,
3378 					     0,
3379 					     intr_context->name,
3380 					     &qdev->rx_ring[i]);
3381 			if (status) {
3382 				netif_err(qdev, ifup, qdev->ndev,
3383 					  "Failed request for MSIX interrupt %d.\n",
3384 					  i);
3385 				goto err_irq;
3386 			}
3387 		} else {
3388 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3389 				     "trying msi or legacy interrupts.\n");
3390 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3391 				     "%s: irq = %d.\n", __func__, pdev->irq);
3392 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3393 				     "%s: context->name = %s.\n", __func__,
3394 				     intr_context->name);
3395 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3396 				     "%s: dev_id = 0x%p.\n", __func__,
3397 				     &qdev->rx_ring[0]);
3398 			status =
3399 				request_irq(pdev->irq, qlge_isr,
3400 					    test_bit(QL_MSI_ENABLED, &qdev->flags)
3401 					    ? 0
3402 					    : IRQF_SHARED,
3403 					    intr_context->name, &qdev->rx_ring[0]);
3404 			if (status)
3405 				goto err_irq;
3406 
3407 			netif_err(qdev, ifup, qdev->ndev,
3408 				  "Hooked intr 0, queue type RX_Q, with name %s.\n",
3409 				  intr_context->name);
3410 		}
3411 		intr_context->hooked = 1;
3412 	}
3413 	return status;
3414 err_irq:
3415 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3416 	qlge_free_irq(qdev);
3417 	return status;
3418 }
3419 
qlge_start_rss(struct qlge_adapter * qdev)3420 static int qlge_start_rss(struct qlge_adapter *qdev)
3421 {
3422 	static const u8 init_hash_seed[] = {
3423 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3424 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3425 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3426 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3427 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3428 	};
3429 	struct ricb *ricb = &qdev->ricb;
3430 	int status = 0;
3431 	int i;
3432 	u8 *hash_id = (u8 *)ricb->hash_cq_id;
3433 
3434 	memset((void *)ricb, 0, sizeof(*ricb));
3435 
3436 	ricb->base_cq = RSS_L4K;
3437 	ricb->flags =
3438 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3439 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3440 
3441 	/*
3442 	 * Fill out the Indirection Table.
3443 	 */
3444 	for (i = 0; i < 1024; i++)
3445 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3446 
3447 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3448 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3449 
3450 	status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3451 	if (status) {
3452 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3453 		return status;
3454 	}
3455 	return status;
3456 }
3457 
qlge_clear_routing_entries(struct qlge_adapter * qdev)3458 static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3459 {
3460 	int i, status = 0;
3461 
3462 	status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3463 	if (status)
3464 		return status;
3465 	/* Clear all the entries in the routing table. */
3466 	for (i = 0; i < 16; i++) {
3467 		status = qlge_set_routing_reg(qdev, i, 0, 0);
3468 		if (status) {
3469 			netif_err(qdev, ifup, qdev->ndev,
3470 				  "Failed to init routing register for CAM packets.\n");
3471 			break;
3472 		}
3473 	}
3474 	qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3475 	return status;
3476 }
3477 
3478 /* Initialize the frame-to-queue routing. */
qlge_route_initialize(struct qlge_adapter * qdev)3479 static int qlge_route_initialize(struct qlge_adapter *qdev)
3480 {
3481 	int status = 0;
3482 
3483 	/* Clear all the entries in the routing table. */
3484 	status = qlge_clear_routing_entries(qdev);
3485 	if (status)
3486 		return status;
3487 
3488 	status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3489 	if (status)
3490 		return status;
3491 
3492 	status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3493 				      RT_IDX_IP_CSUM_ERR, 1);
3494 	if (status) {
3495 		netif_err(qdev, ifup, qdev->ndev,
3496 			  "Failed to init routing register for IP CSUM error packets.\n");
3497 		goto exit;
3498 	}
3499 	status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3500 				      RT_IDX_TU_CSUM_ERR, 1);
3501 	if (status) {
3502 		netif_err(qdev, ifup, qdev->ndev,
3503 			  "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3504 		goto exit;
3505 	}
3506 	status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3507 	if (status) {
3508 		netif_err(qdev, ifup, qdev->ndev,
3509 			  "Failed to init routing register for broadcast packets.\n");
3510 		goto exit;
3511 	}
3512 	/* If we have more than one inbound queue, then turn on RSS in the
3513 	 * routing block.
3514 	 */
3515 	if (qdev->rss_ring_count > 1) {
3516 		status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3517 					      RT_IDX_RSS_MATCH, 1);
3518 		if (status) {
3519 			netif_err(qdev, ifup, qdev->ndev,
3520 				  "Failed to init routing register for MATCH RSS packets.\n");
3521 			goto exit;
3522 		}
3523 	}
3524 
3525 	status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3526 				      RT_IDX_CAM_HIT, 1);
3527 	if (status)
3528 		netif_err(qdev, ifup, qdev->ndev,
3529 			  "Failed to init routing register for CAM packets.\n");
3530 exit:
3531 	qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3532 	return status;
3533 }
3534 
qlge_cam_route_initialize(struct qlge_adapter * qdev)3535 int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3536 {
3537 	int status, set;
3538 
3539 	/* If check if the link is up and use to
3540 	 * determine if we are setting or clearing
3541 	 * the MAC address in the CAM.
3542 	 */
3543 	set = qlge_read32(qdev, STS);
3544 	set &= qdev->port_link_up;
3545 	status = qlge_set_mac_addr(qdev, set);
3546 	if (status) {
3547 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3548 		return status;
3549 	}
3550 
3551 	status = qlge_route_initialize(qdev);
3552 	if (status)
3553 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3554 
3555 	return status;
3556 }
3557 
qlge_adapter_initialize(struct qlge_adapter * qdev)3558 static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3559 {
3560 	u32 value, mask;
3561 	int i;
3562 	int status = 0;
3563 
3564 	/*
3565 	 * Set up the System register to halt on errors.
3566 	 */
3567 	value = SYS_EFE | SYS_FAE;
3568 	mask = value << 16;
3569 	qlge_write32(qdev, SYS, mask | value);
3570 
3571 	/* Set the default queue, and VLAN behavior. */
3572 	value = NIC_RCV_CFG_DFQ;
3573 	mask = NIC_RCV_CFG_DFQ_MASK;
3574 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3575 		value |= NIC_RCV_CFG_RV;
3576 		mask |= (NIC_RCV_CFG_RV << 16);
3577 	}
3578 	qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3579 
3580 	/* Set the MPI interrupt to enabled. */
3581 	qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3582 
3583 	/* Enable the function, set pagesize, enable error checking. */
3584 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3585 		FSC_EC | FSC_VM_PAGE_4K;
3586 	value |= SPLT_SETTING;
3587 
3588 	/* Set/clear header splitting. */
3589 	mask = FSC_VM_PAGESIZE_MASK |
3590 		FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3591 	qlge_write32(qdev, FSC, mask | value);
3592 
3593 	qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3594 
3595 	/* Set RX packet routing to use port/pci function on which the
3596 	 * packet arrived on in addition to usual frame routing.
3597 	 * This is helpful on bonding where both interfaces can have
3598 	 * the same MAC address.
3599 	 */
3600 	qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3601 	/* Reroute all packets to our Interface.
3602 	 * They may have been routed to MPI firmware
3603 	 * due to WOL.
3604 	 */
3605 	value = qlge_read32(qdev, MGMT_RCV_CFG);
3606 	value &= ~MGMT_RCV_CFG_RM;
3607 	mask = 0xffff0000;
3608 
3609 	/* Sticky reg needs clearing due to WOL. */
3610 	qlge_write32(qdev, MGMT_RCV_CFG, mask);
3611 	qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3612 
3613 	/* Default WOL is enable on Mezz cards */
3614 	if (qdev->pdev->subsystem_device == 0x0068 ||
3615 	    qdev->pdev->subsystem_device == 0x0180)
3616 		qdev->wol = WAKE_MAGIC;
3617 
3618 	/* Start up the rx queues. */
3619 	for (i = 0; i < qdev->rx_ring_count; i++) {
3620 		status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3621 		if (status) {
3622 			netif_err(qdev, ifup, qdev->ndev,
3623 				  "Failed to start rx ring[%d].\n", i);
3624 			return status;
3625 		}
3626 	}
3627 
3628 	/* If there is more than one inbound completion queue
3629 	 * then download a RICB to configure RSS.
3630 	 */
3631 	if (qdev->rss_ring_count > 1) {
3632 		status = qlge_start_rss(qdev);
3633 		if (status) {
3634 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3635 			return status;
3636 		}
3637 	}
3638 
3639 	/* Start up the tx queues. */
3640 	for (i = 0; i < qdev->tx_ring_count; i++) {
3641 		status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3642 		if (status) {
3643 			netif_err(qdev, ifup, qdev->ndev,
3644 				  "Failed to start tx ring[%d].\n", i);
3645 			return status;
3646 		}
3647 	}
3648 
3649 	/* Initialize the port and set the max framesize. */
3650 	status = qdev->nic_ops->port_initialize(qdev);
3651 	if (status)
3652 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3653 
3654 	/* Set up the MAC address and frame routing filter. */
3655 	status = qlge_cam_route_initialize(qdev);
3656 	if (status) {
3657 		netif_err(qdev, ifup, qdev->ndev,
3658 			  "Failed to init CAM/Routing tables.\n");
3659 		return status;
3660 	}
3661 
3662 	/* Start NAPI for the RSS queues. */
3663 	for (i = 0; i < qdev->rss_ring_count; i++)
3664 		napi_enable(&qdev->rx_ring[i].napi);
3665 
3666 	return status;
3667 }
3668 
3669 /* Issue soft reset to chip. */
qlge_adapter_reset(struct qlge_adapter * qdev)3670 static int qlge_adapter_reset(struct qlge_adapter *qdev)
3671 {
3672 	u32 value;
3673 	int status = 0;
3674 	unsigned long end_jiffies;
3675 
3676 	/* Clear all the entries in the routing table. */
3677 	status = qlge_clear_routing_entries(qdev);
3678 	if (status) {
3679 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3680 		return status;
3681 	}
3682 
3683 	/* Check if bit is set then skip the mailbox command and
3684 	 * clear the bit, else we are in normal reset process.
3685 	 */
3686 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3687 		/* Stop management traffic. */
3688 		qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3689 
3690 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3691 		qlge_wait_fifo_empty(qdev);
3692 	} else {
3693 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3694 	}
3695 
3696 	qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3697 
3698 	end_jiffies = jiffies + usecs_to_jiffies(30);
3699 	do {
3700 		value = qlge_read32(qdev, RST_FO);
3701 		if ((value & RST_FO_FR) == 0)
3702 			break;
3703 		cpu_relax();
3704 	} while (time_before(jiffies, end_jiffies));
3705 
3706 	if (value & RST_FO_FR) {
3707 		netif_err(qdev, ifdown, qdev->ndev,
3708 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3709 		status = -ETIMEDOUT;
3710 	}
3711 
3712 	/* Resume management traffic. */
3713 	qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3714 	return status;
3715 }
3716 
qlge_display_dev_info(struct net_device * ndev)3717 static void qlge_display_dev_info(struct net_device *ndev)
3718 {
3719 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3720 
3721 	netif_info(qdev, probe, qdev->ndev,
3722 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3723 		   qdev->func,
3724 		   qdev->port,
3725 		   qdev->chip_rev_id & 0x0000000f,
3726 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3727 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3728 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3729 	netif_info(qdev, probe, qdev->ndev,
3730 		   "MAC address %pM\n", ndev->dev_addr);
3731 }
3732 
qlge_wol(struct qlge_adapter * qdev)3733 static int qlge_wol(struct qlge_adapter *qdev)
3734 {
3735 	int status = 0;
3736 	u32 wol = MB_WOL_DISABLE;
3737 
3738 	/* The CAM is still intact after a reset, but if we
3739 	 * are doing WOL, then we may need to program the
3740 	 * routing regs. We would also need to issue the mailbox
3741 	 * commands to instruct the MPI what to do per the ethtool
3742 	 * settings.
3743 	 */
3744 
3745 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3746 			 WAKE_MCAST | WAKE_BCAST)) {
3747 		netif_err(qdev, ifdown, qdev->ndev,
3748 			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3749 			  qdev->wol);
3750 		return -EINVAL;
3751 	}
3752 
3753 	if (qdev->wol & WAKE_MAGIC) {
3754 		status = qlge_mb_wol_set_magic(qdev, 1);
3755 		if (status) {
3756 			netif_err(qdev, ifdown, qdev->ndev,
3757 				  "Failed to set magic packet on %s.\n",
3758 				  qdev->ndev->name);
3759 			return status;
3760 		}
3761 		netif_info(qdev, drv, qdev->ndev,
3762 			   "Enabled magic packet successfully on %s.\n",
3763 			   qdev->ndev->name);
3764 
3765 		wol |= MB_WOL_MAGIC_PKT;
3766 	}
3767 
3768 	if (qdev->wol) {
3769 		wol |= MB_WOL_MODE_ON;
3770 		status = qlge_mb_wol_mode(qdev, wol);
3771 		netif_err(qdev, drv, qdev->ndev,
3772 			  "WOL %s (wol code 0x%x) on %s\n",
3773 			  (status == 0) ? "Successfully set" : "Failed",
3774 			  wol, qdev->ndev->name);
3775 	}
3776 
3777 	return status;
3778 }
3779 
qlge_cancel_all_work_sync(struct qlge_adapter * qdev)3780 static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3781 {
3782 	/* Don't kill the reset worker thread if we
3783 	 * are in the process of recovery.
3784 	 */
3785 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3786 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3787 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3788 	cancel_delayed_work_sync(&qdev->mpi_work);
3789 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3790 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3791 }
3792 
qlge_adapter_down(struct qlge_adapter * qdev)3793 static int qlge_adapter_down(struct qlge_adapter *qdev)
3794 {
3795 	int i, status = 0;
3796 
3797 	qlge_link_off(qdev);
3798 
3799 	qlge_cancel_all_work_sync(qdev);
3800 
3801 	for (i = 0; i < qdev->rss_ring_count; i++)
3802 		napi_disable(&qdev->rx_ring[i].napi);
3803 
3804 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3805 
3806 	qlge_disable_interrupts(qdev);
3807 
3808 	qlge_tx_ring_clean(qdev);
3809 
3810 	/* Call netif_napi_del() from common point. */
3811 	for (i = 0; i < qdev->rss_ring_count; i++)
3812 		netif_napi_del(&qdev->rx_ring[i].napi);
3813 
3814 	status = qlge_adapter_reset(qdev);
3815 	if (status)
3816 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3817 			  qdev->func);
3818 	qlge_free_rx_buffers(qdev);
3819 
3820 	return status;
3821 }
3822 
qlge_adapter_up(struct qlge_adapter * qdev)3823 static int qlge_adapter_up(struct qlge_adapter *qdev)
3824 {
3825 	int err = 0;
3826 
3827 	err = qlge_adapter_initialize(qdev);
3828 	if (err) {
3829 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3830 		goto err_init;
3831 	}
3832 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3833 	qlge_alloc_rx_buffers(qdev);
3834 	/* If the port is initialized and the
3835 	 * link is up the turn on the carrier.
3836 	 */
3837 	if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3838 	    (qlge_read32(qdev, STS) & qdev->port_link_up))
3839 		qlge_link_on(qdev);
3840 	/* Restore rx mode. */
3841 	clear_bit(QL_ALLMULTI, &qdev->flags);
3842 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
3843 	qlge_set_multicast_list(qdev->ndev);
3844 
3845 	/* Restore vlan setting. */
3846 	qlge_restore_vlan(qdev);
3847 
3848 	qlge_enable_interrupts(qdev);
3849 	qlge_enable_all_completion_interrupts(qdev);
3850 	netif_tx_start_all_queues(qdev->ndev);
3851 
3852 	return 0;
3853 err_init:
3854 	qlge_adapter_reset(qdev);
3855 	return err;
3856 }
3857 
qlge_release_adapter_resources(struct qlge_adapter * qdev)3858 static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3859 {
3860 	qlge_free_mem_resources(qdev);
3861 	qlge_free_irq(qdev);
3862 }
3863 
qlge_get_adapter_resources(struct qlge_adapter * qdev)3864 static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3865 {
3866 	if (qlge_alloc_mem_resources(qdev)) {
3867 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3868 		return -ENOMEM;
3869 	}
3870 	return qlge_request_irq(qdev);
3871 }
3872 
qlge_close(struct net_device * ndev)3873 static int qlge_close(struct net_device *ndev)
3874 {
3875 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3876 	int i;
3877 
3878 	/* If we hit pci_channel_io_perm_failure
3879 	 * failure condition, then we already
3880 	 * brought the adapter down.
3881 	 */
3882 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3883 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3884 		clear_bit(QL_EEH_FATAL, &qdev->flags);
3885 		return 0;
3886 	}
3887 
3888 	/*
3889 	 * Wait for device to recover from a reset.
3890 	 * (Rarely happens, but possible.)
3891 	 */
3892 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3893 		msleep(1);
3894 
3895 	/* Make sure refill_work doesn't re-enable napi */
3896 	for (i = 0; i < qdev->rss_ring_count; i++)
3897 		cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3898 
3899 	qlge_adapter_down(qdev);
3900 	qlge_release_adapter_resources(qdev);
3901 	return 0;
3902 }
3903 
qlge_set_lb_size(struct qlge_adapter * qdev)3904 static void qlge_set_lb_size(struct qlge_adapter *qdev)
3905 {
3906 	if (qdev->ndev->mtu <= 1500)
3907 		qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3908 	else
3909 		qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3910 	qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3911 }
3912 
qlge_configure_rings(struct qlge_adapter * qdev)3913 static int qlge_configure_rings(struct qlge_adapter *qdev)
3914 {
3915 	int i;
3916 	struct rx_ring *rx_ring;
3917 	struct tx_ring *tx_ring;
3918 	int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3919 
3920 	/* In a perfect world we have one RSS ring for each CPU
3921 	 * and each has it's own vector.  To do that we ask for
3922 	 * cpu_cnt vectors.  qlge_enable_msix() will adjust the
3923 	 * vector count to what we actually get.  We then
3924 	 * allocate an RSS ring for each.
3925 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
3926 	 */
3927 	qdev->intr_count = cpu_cnt;
3928 	qlge_enable_msix(qdev);
3929 	/* Adjust the RSS ring count to the actual vector count. */
3930 	qdev->rss_ring_count = qdev->intr_count;
3931 	qdev->tx_ring_count = cpu_cnt;
3932 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3933 
3934 	for (i = 0; i < qdev->tx_ring_count; i++) {
3935 		tx_ring = &qdev->tx_ring[i];
3936 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
3937 		tx_ring->qdev = qdev;
3938 		tx_ring->wq_id = i;
3939 		tx_ring->wq_len = qdev->tx_ring_size;
3940 		tx_ring->wq_size =
3941 			tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3942 
3943 		/*
3944 		 * The completion queue ID for the tx rings start
3945 		 * immediately after the rss rings.
3946 		 */
3947 		tx_ring->cq_id = qdev->rss_ring_count + i;
3948 	}
3949 
3950 	for (i = 0; i < qdev->rx_ring_count; i++) {
3951 		rx_ring = &qdev->rx_ring[i];
3952 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
3953 		rx_ring->qdev = qdev;
3954 		rx_ring->cq_id = i;
3955 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
3956 		if (i < qdev->rss_ring_count) {
3957 			/*
3958 			 * Inbound (RSS) queues.
3959 			 */
3960 			rx_ring->cq_len = qdev->rx_ring_size;
3961 			rx_ring->cq_size =
3962 				rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3963 			rx_ring->lbq.type = QLGE_LB;
3964 			rx_ring->sbq.type = QLGE_SB;
3965 			INIT_DELAYED_WORK(&rx_ring->refill_work,
3966 					  &qlge_slow_refill);
3967 		} else {
3968 			/*
3969 			 * Outbound queue handles outbound completions only.
3970 			 */
3971 			/* outbound cq is same size as tx_ring it services. */
3972 			rx_ring->cq_len = qdev->tx_ring_size;
3973 			rx_ring->cq_size =
3974 				rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3975 		}
3976 	}
3977 	return 0;
3978 }
3979 
qlge_open(struct net_device * ndev)3980 static int qlge_open(struct net_device *ndev)
3981 {
3982 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3983 	int err = 0;
3984 
3985 	err = qlge_adapter_reset(qdev);
3986 	if (err)
3987 		return err;
3988 
3989 	qlge_set_lb_size(qdev);
3990 	err = qlge_configure_rings(qdev);
3991 	if (err)
3992 		return err;
3993 
3994 	err = qlge_get_adapter_resources(qdev);
3995 	if (err)
3996 		goto error_up;
3997 
3998 	err = qlge_adapter_up(qdev);
3999 	if (err)
4000 		goto error_up;
4001 
4002 	return err;
4003 
4004 error_up:
4005 	qlge_release_adapter_resources(qdev);
4006 	return err;
4007 }
4008 
qlge_change_rx_buffers(struct qlge_adapter * qdev)4009 static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4010 {
4011 	int status;
4012 
4013 	/* Wait for an outstanding reset to complete. */
4014 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4015 		int i = 4;
4016 
4017 		while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4018 			netif_err(qdev, ifup, qdev->ndev,
4019 				  "Waiting for adapter UP...\n");
4020 			ssleep(1);
4021 		}
4022 
4023 		if (!i) {
4024 			netif_err(qdev, ifup, qdev->ndev,
4025 				  "Timed out waiting for adapter UP\n");
4026 			return -ETIMEDOUT;
4027 		}
4028 	}
4029 
4030 	status = qlge_adapter_down(qdev);
4031 	if (status)
4032 		goto error;
4033 
4034 	qlge_set_lb_size(qdev);
4035 
4036 	status = qlge_adapter_up(qdev);
4037 	if (status)
4038 		goto error;
4039 
4040 	return status;
4041 error:
4042 	netif_alert(qdev, ifup, qdev->ndev,
4043 		    "Driver up/down cycle failed, closing device.\n");
4044 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4045 	dev_close(qdev->ndev);
4046 	return status;
4047 }
4048 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4049 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4050 {
4051 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4052 	int status;
4053 
4054 	if (ndev->mtu == 1500 && new_mtu == 9000)
4055 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4056 	else if (ndev->mtu == 9000 && new_mtu == 1500)
4057 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4058 	else
4059 		return -EINVAL;
4060 
4061 	queue_delayed_work(qdev->workqueue,
4062 			   &qdev->mpi_port_cfg_work, 3 * HZ);
4063 
4064 	ndev->mtu = new_mtu;
4065 
4066 	if (!netif_running(qdev->ndev))
4067 		return 0;
4068 
4069 	status = qlge_change_rx_buffers(qdev);
4070 	if (status) {
4071 		netif_err(qdev, ifup, qdev->ndev,
4072 			  "Changing MTU failed.\n");
4073 	}
4074 
4075 	return status;
4076 }
4077 
qlge_get_stats(struct net_device * ndev)4078 static struct net_device_stats *qlge_get_stats(struct net_device
4079 					       *ndev)
4080 {
4081 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4082 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4083 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4084 	unsigned long pkts, mcast, dropped, errors, bytes;
4085 	int i;
4086 
4087 	/* Get RX stats. */
4088 	pkts = mcast = dropped = errors = bytes = 0;
4089 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4090 		pkts += rx_ring->rx_packets;
4091 		bytes += rx_ring->rx_bytes;
4092 		dropped += rx_ring->rx_dropped;
4093 		errors += rx_ring->rx_errors;
4094 		mcast += rx_ring->rx_multicast;
4095 	}
4096 	ndev->stats.rx_packets = pkts;
4097 	ndev->stats.rx_bytes = bytes;
4098 	ndev->stats.rx_dropped = dropped;
4099 	ndev->stats.rx_errors = errors;
4100 	ndev->stats.multicast = mcast;
4101 
4102 	/* Get TX stats. */
4103 	pkts = errors = bytes = 0;
4104 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4105 		pkts += tx_ring->tx_packets;
4106 		bytes += tx_ring->tx_bytes;
4107 		errors += tx_ring->tx_errors;
4108 	}
4109 	ndev->stats.tx_packets = pkts;
4110 	ndev->stats.tx_bytes = bytes;
4111 	ndev->stats.tx_errors = errors;
4112 	return &ndev->stats;
4113 }
4114 
qlge_set_multicast_list(struct net_device * ndev)4115 static void qlge_set_multicast_list(struct net_device *ndev)
4116 {
4117 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4118 	struct netdev_hw_addr *ha;
4119 	int i, status;
4120 
4121 	status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4122 	if (status)
4123 		return;
4124 	/*
4125 	 * Set or clear promiscuous mode if a
4126 	 * transition is taking place.
4127 	 */
4128 	if (ndev->flags & IFF_PROMISC) {
4129 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4130 			if (qlge_set_routing_reg
4131 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4132 				netif_err(qdev, hw, qdev->ndev,
4133 					  "Failed to set promiscuous mode.\n");
4134 			} else {
4135 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4136 			}
4137 		}
4138 	} else {
4139 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4140 			if (qlge_set_routing_reg
4141 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4142 				netif_err(qdev, hw, qdev->ndev,
4143 					  "Failed to clear promiscuous mode.\n");
4144 			} else {
4145 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4146 			}
4147 		}
4148 	}
4149 
4150 	/*
4151 	 * Set or clear all multicast mode if a
4152 	 * transition is taking place.
4153 	 */
4154 	if ((ndev->flags & IFF_ALLMULTI) ||
4155 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4156 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4157 			if (qlge_set_routing_reg
4158 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4159 				netif_err(qdev, hw, qdev->ndev,
4160 					  "Failed to set all-multi mode.\n");
4161 			} else {
4162 				set_bit(QL_ALLMULTI, &qdev->flags);
4163 			}
4164 		}
4165 	} else {
4166 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4167 			if (qlge_set_routing_reg
4168 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4169 				netif_err(qdev, hw, qdev->ndev,
4170 					  "Failed to clear all-multi mode.\n");
4171 			} else {
4172 				clear_bit(QL_ALLMULTI, &qdev->flags);
4173 			}
4174 		}
4175 	}
4176 
4177 	if (!netdev_mc_empty(ndev)) {
4178 		status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4179 		if (status)
4180 			goto exit;
4181 		i = 0;
4182 		netdev_for_each_mc_addr(ha, ndev) {
4183 			if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4184 						  MAC_ADDR_TYPE_MULTI_MAC, i)) {
4185 				netif_err(qdev, hw, qdev->ndev,
4186 					  "Failed to loadmulticast address.\n");
4187 				qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4188 				goto exit;
4189 			}
4190 			i++;
4191 		}
4192 		qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4193 		if (qlge_set_routing_reg
4194 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4195 			netif_err(qdev, hw, qdev->ndev,
4196 				  "Failed to set multicast match mode.\n");
4197 		} else {
4198 			set_bit(QL_ALLMULTI, &qdev->flags);
4199 		}
4200 	}
4201 exit:
4202 	qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4203 }
4204 
qlge_set_mac_address(struct net_device * ndev,void * p)4205 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4206 {
4207 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4208 	struct sockaddr *addr = p;
4209 	int status;
4210 
4211 	if (!is_valid_ether_addr(addr->sa_data))
4212 		return -EADDRNOTAVAIL;
4213 	eth_hw_addr_set(ndev, addr->sa_data);
4214 	/* Update local copy of current mac address. */
4215 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4216 
4217 	status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4218 	if (status)
4219 		return status;
4220 	status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
4221 				       MAC_ADDR_TYPE_CAM_MAC,
4222 				       qdev->func * MAX_CQ);
4223 	if (status)
4224 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4225 	qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4226 	return status;
4227 }
4228 
qlge_tx_timeout(struct net_device * ndev,unsigned int txqueue)4229 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4230 {
4231 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4232 
4233 	qlge_queue_asic_error(qdev);
4234 }
4235 
qlge_asic_reset_work(struct work_struct * work)4236 static void qlge_asic_reset_work(struct work_struct *work)
4237 {
4238 	struct qlge_adapter *qdev =
4239 		container_of(work, struct qlge_adapter, asic_reset_work.work);
4240 	int status;
4241 
4242 	rtnl_lock();
4243 	status = qlge_adapter_down(qdev);
4244 	if (status)
4245 		goto error;
4246 
4247 	status = qlge_adapter_up(qdev);
4248 	if (status)
4249 		goto error;
4250 
4251 	/* Restore rx mode. */
4252 	clear_bit(QL_ALLMULTI, &qdev->flags);
4253 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4254 	qlge_set_multicast_list(qdev->ndev);
4255 
4256 	rtnl_unlock();
4257 	return;
4258 error:
4259 	netif_alert(qdev, ifup, qdev->ndev,
4260 		    "Driver up/down cycle failed, closing device\n");
4261 
4262 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4263 	dev_close(qdev->ndev);
4264 	rtnl_unlock();
4265 }
4266 
4267 static const struct nic_operations qla8012_nic_ops = {
4268 	.get_flash		= qlge_get_8012_flash_params,
4269 	.port_initialize	= qlge_8012_port_initialize,
4270 };
4271 
4272 static const struct nic_operations qla8000_nic_ops = {
4273 	.get_flash		= qlge_get_8000_flash_params,
4274 	.port_initialize	= qlge_8000_port_initialize,
4275 };
4276 
4277 /* Find the pcie function number for the other NIC
4278  * on this chip.  Since both NIC functions share a
4279  * common firmware we have the lowest enabled function
4280  * do any common work.  Examples would be resetting
4281  * after a fatal firmware error, or doing a firmware
4282  * coredump.
4283  */
qlge_get_alt_pcie_func(struct qlge_adapter * qdev)4284 static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4285 {
4286 	int status = 0;
4287 	u32 temp;
4288 	u32 nic_func1, nic_func2;
4289 
4290 	status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4291 				   &temp);
4292 	if (status)
4293 		return status;
4294 
4295 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4296 		     MPI_TEST_NIC_FUNC_MASK);
4297 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4298 		     MPI_TEST_NIC_FUNC_MASK);
4299 
4300 	if (qdev->func == nic_func1)
4301 		qdev->alt_func = nic_func2;
4302 	else if (qdev->func == nic_func2)
4303 		qdev->alt_func = nic_func1;
4304 	else
4305 		status = -EIO;
4306 
4307 	return status;
4308 }
4309 
qlge_get_board_info(struct qlge_adapter * qdev)4310 static int qlge_get_board_info(struct qlge_adapter *qdev)
4311 {
4312 	int status;
4313 
4314 	qdev->func =
4315 		(qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4316 	if (qdev->func > 3)
4317 		return -EIO;
4318 
4319 	status = qlge_get_alt_pcie_func(qdev);
4320 	if (status)
4321 		return status;
4322 
4323 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4324 	if (qdev->port) {
4325 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4326 		qdev->port_link_up = STS_PL1;
4327 		qdev->port_init = STS_PI1;
4328 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4329 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4330 	} else {
4331 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4332 		qdev->port_link_up = STS_PL0;
4333 		qdev->port_init = STS_PI0;
4334 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4335 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4336 	}
4337 	qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4338 	qdev->device_id = qdev->pdev->device;
4339 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4340 		qdev->nic_ops = &qla8012_nic_ops;
4341 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4342 		qdev->nic_ops = &qla8000_nic_ops;
4343 	return status;
4344 }
4345 
qlge_release_all(struct pci_dev * pdev)4346 static void qlge_release_all(struct pci_dev *pdev)
4347 {
4348 	struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4349 
4350 	if (qdev->workqueue) {
4351 		destroy_workqueue(qdev->workqueue);
4352 		qdev->workqueue = NULL;
4353 	}
4354 
4355 	if (qdev->reg_base)
4356 		iounmap(qdev->reg_base);
4357 	if (qdev->doorbell_area)
4358 		iounmap(qdev->doorbell_area);
4359 	vfree(qdev->mpi_coredump);
4360 	pci_release_regions(pdev);
4361 }
4362 
qlge_init_device(struct pci_dev * pdev,struct qlge_adapter * qdev,int cards_found)4363 static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4364 			    int cards_found)
4365 {
4366 	struct net_device *ndev = qdev->ndev;
4367 	int err = 0;
4368 
4369 	err = pci_enable_device(pdev);
4370 	if (err) {
4371 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4372 		return err;
4373 	}
4374 
4375 	qdev->pdev = pdev;
4376 	pci_set_drvdata(pdev, qdev);
4377 
4378 	/* Set PCIe read request size */
4379 	err = pcie_set_readrq(pdev, 4096);
4380 	if (err) {
4381 		dev_err(&pdev->dev, "Set readrq failed.\n");
4382 		goto err_disable_pci;
4383 	}
4384 
4385 	err = pci_request_regions(pdev, DRV_NAME);
4386 	if (err) {
4387 		dev_err(&pdev->dev, "PCI region request failed.\n");
4388 		goto err_disable_pci;
4389 	}
4390 
4391 	pci_set_master(pdev);
4392 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4393 		set_bit(QL_DMA64, &qdev->flags);
4394 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4395 	} else {
4396 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4397 		if (!err)
4398 			err = dma_set_coherent_mask(&pdev->dev,
4399 						    DMA_BIT_MASK(32));
4400 	}
4401 
4402 	if (err) {
4403 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4404 		goto err_release_pci;
4405 	}
4406 
4407 	/* Set PCIe reset type for EEH to fundamental. */
4408 	pdev->needs_freset = 1;
4409 	pci_save_state(pdev);
4410 	qdev->reg_base =
4411 		ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4412 	if (!qdev->reg_base) {
4413 		dev_err(&pdev->dev, "Register mapping failed.\n");
4414 		err = -ENOMEM;
4415 		goto err_release_pci;
4416 	}
4417 
4418 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4419 	qdev->doorbell_area =
4420 		ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4421 	if (!qdev->doorbell_area) {
4422 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4423 		err = -ENOMEM;
4424 		goto err_iounmap_base;
4425 	}
4426 
4427 	err = qlge_get_board_info(qdev);
4428 	if (err) {
4429 		dev_err(&pdev->dev, "Register access failed.\n");
4430 		err = -EIO;
4431 		goto err_iounmap_doorbell;
4432 	}
4433 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4434 	spin_lock_init(&qdev->stats_lock);
4435 
4436 	if (qlge_mpi_coredump) {
4437 		qdev->mpi_coredump =
4438 			vmalloc(sizeof(struct qlge_mpi_coredump));
4439 		if (!qdev->mpi_coredump) {
4440 			err = -ENOMEM;
4441 			goto err_iounmap_doorbell;
4442 		}
4443 		if (qlge_force_coredump)
4444 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4445 	}
4446 	/* make sure the EEPROM is good */
4447 	err = qdev->nic_ops->get_flash(qdev);
4448 	if (err) {
4449 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4450 		goto err_free_mpi_coredump;
4451 	}
4452 
4453 	/* Keep local copy of current mac address. */
4454 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4455 
4456 	/* Set up the default ring sizes. */
4457 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4458 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4459 
4460 	/* Set up the coalescing parameters. */
4461 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4462 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4463 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4464 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4465 
4466 	/*
4467 	 * Set up the operating parameters.
4468 	 */
4469 	qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4470 						  ndev->name);
4471 	if (!qdev->workqueue) {
4472 		err = -ENOMEM;
4473 		goto err_free_mpi_coredump;
4474 	}
4475 
4476 	INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4477 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4478 	INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4479 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4480 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4481 	init_completion(&qdev->ide_completion);
4482 	mutex_init(&qdev->mpi_mutex);
4483 
4484 	if (!cards_found) {
4485 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4486 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4487 			 DRV_NAME, DRV_VERSION);
4488 	}
4489 	return 0;
4490 
4491 err_free_mpi_coredump:
4492 	vfree(qdev->mpi_coredump);
4493 err_iounmap_doorbell:
4494 	iounmap(qdev->doorbell_area);
4495 err_iounmap_base:
4496 	iounmap(qdev->reg_base);
4497 err_release_pci:
4498 	pci_release_regions(pdev);
4499 err_disable_pci:
4500 	pci_disable_device(pdev);
4501 
4502 	return err;
4503 }
4504 
4505 static const struct net_device_ops qlge_netdev_ops = {
4506 	.ndo_open		= qlge_open,
4507 	.ndo_stop		= qlge_close,
4508 	.ndo_start_xmit		= qlge_send,
4509 	.ndo_change_mtu		= qlge_change_mtu,
4510 	.ndo_get_stats		= qlge_get_stats,
4511 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4512 	.ndo_set_mac_address	= qlge_set_mac_address,
4513 	.ndo_validate_addr	= eth_validate_addr,
4514 	.ndo_tx_timeout		= qlge_tx_timeout,
4515 	.ndo_set_features	= qlge_set_features,
4516 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4517 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4518 };
4519 
qlge_timer(struct timer_list * t)4520 static void qlge_timer(struct timer_list *t)
4521 {
4522 	struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4523 	u32 var = 0;
4524 
4525 	var = qlge_read32(qdev, STS);
4526 	if (pci_channel_offline(qdev->pdev)) {
4527 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4528 		return;
4529 	}
4530 
4531 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4532 }
4533 
4534 static const struct devlink_ops qlge_devlink_ops;
4535 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4536 static int qlge_probe(struct pci_dev *pdev,
4537 		      const struct pci_device_id *pci_entry)
4538 {
4539 	struct qlge_netdev_priv *ndev_priv;
4540 	struct qlge_adapter *qdev = NULL;
4541 	struct net_device *ndev = NULL;
4542 	struct devlink *devlink;
4543 	static int cards_found;
4544 	int err;
4545 
4546 	devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
4547 				&pdev->dev);
4548 	if (!devlink)
4549 		return -ENOMEM;
4550 
4551 	qdev = devlink_priv(devlink);
4552 
4553 	ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4554 				 min(MAX_CPUS,
4555 				     netif_get_num_default_rss_queues()));
4556 	if (!ndev) {
4557 		err = -ENOMEM;
4558 		goto devlink_free;
4559 	}
4560 
4561 	ndev_priv = netdev_priv(ndev);
4562 	ndev_priv->qdev = qdev;
4563 	ndev_priv->ndev = ndev;
4564 	qdev->ndev = ndev;
4565 	err = qlge_init_device(pdev, qdev, cards_found);
4566 	if (err < 0)
4567 		goto netdev_free;
4568 
4569 	SET_NETDEV_DEV(ndev, &pdev->dev);
4570 	ndev->hw_features = NETIF_F_SG |
4571 		NETIF_F_IP_CSUM |
4572 		NETIF_F_TSO |
4573 		NETIF_F_TSO_ECN |
4574 		NETIF_F_HW_VLAN_CTAG_TX |
4575 		NETIF_F_HW_VLAN_CTAG_RX |
4576 		NETIF_F_HW_VLAN_CTAG_FILTER |
4577 		NETIF_F_RXCSUM;
4578 	ndev->features = ndev->hw_features;
4579 	ndev->vlan_features = ndev->hw_features;
4580 	/* vlan gets same features (except vlan filter) */
4581 	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4582 				 NETIF_F_HW_VLAN_CTAG_TX |
4583 				 NETIF_F_HW_VLAN_CTAG_RX);
4584 
4585 	if (test_bit(QL_DMA64, &qdev->flags))
4586 		ndev->features |= NETIF_F_HIGHDMA;
4587 
4588 	/*
4589 	 * Set up net_device structure.
4590 	 */
4591 	ndev->tx_queue_len = qdev->tx_ring_size;
4592 	ndev->irq = pdev->irq;
4593 
4594 	ndev->netdev_ops = &qlge_netdev_ops;
4595 	ndev->ethtool_ops = &qlge_ethtool_ops;
4596 	ndev->watchdog_timeo = 10 * HZ;
4597 
4598 	/* MTU range: this driver only supports 1500 or 9000, so this only
4599 	 * filters out values above or below, and we'll rely on
4600 	 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4601 	 */
4602 	ndev->min_mtu = ETH_DATA_LEN;
4603 	ndev->max_mtu = 9000;
4604 
4605 	err = register_netdev(ndev);
4606 	if (err) {
4607 		dev_err(&pdev->dev, "net device registration failed.\n");
4608 		goto cleanup_pdev;
4609 	}
4610 
4611 	err = qlge_health_create_reporters(qdev);
4612 	if (err)
4613 		goto unregister_netdev;
4614 
4615 	/* Start up the timer to trigger EEH if
4616 	 * the bus goes dead
4617 	 */
4618 	timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4619 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4620 	qlge_link_off(qdev);
4621 	qlge_display_dev_info(ndev);
4622 	atomic_set(&qdev->lb_count, 0);
4623 	cards_found++;
4624 	devlink_register(devlink);
4625 	return 0;
4626 
4627 unregister_netdev:
4628 	unregister_netdev(ndev);
4629 cleanup_pdev:
4630 	qlge_release_all(pdev);
4631 	pci_disable_device(pdev);
4632 netdev_free:
4633 	free_netdev(ndev);
4634 devlink_free:
4635 	devlink_free(devlink);
4636 
4637 	return err;
4638 }
4639 
qlge_lb_send(struct sk_buff * skb,struct net_device * ndev)4640 netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4641 {
4642 	return qlge_send(skb, ndev);
4643 }
4644 
qlge_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4645 int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4646 {
4647 	return qlge_clean_inbound_rx_ring(rx_ring, budget);
4648 }
4649 
qlge_remove(struct pci_dev * pdev)4650 static void qlge_remove(struct pci_dev *pdev)
4651 {
4652 	struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4653 	struct net_device *ndev = qdev->ndev;
4654 	struct devlink *devlink = priv_to_devlink(qdev);
4655 
4656 	devlink_unregister(devlink);
4657 	del_timer_sync(&qdev->timer);
4658 	qlge_cancel_all_work_sync(qdev);
4659 	unregister_netdev(ndev);
4660 	qlge_release_all(pdev);
4661 	pci_disable_device(pdev);
4662 	devlink_health_reporter_destroy(qdev->reporter);
4663 	devlink_free(devlink);
4664 	free_netdev(ndev);
4665 }
4666 
4667 /* Clean up resources without touching hardware. */
qlge_eeh_close(struct net_device * ndev)4668 static void qlge_eeh_close(struct net_device *ndev)
4669 {
4670 	struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4671 	int i;
4672 
4673 	if (netif_carrier_ok(ndev)) {
4674 		netif_carrier_off(ndev);
4675 		netif_stop_queue(ndev);
4676 	}
4677 
4678 	/* Disabling the timer */
4679 	qlge_cancel_all_work_sync(qdev);
4680 
4681 	for (i = 0; i < qdev->rss_ring_count; i++)
4682 		netif_napi_del(&qdev->rx_ring[i].napi);
4683 
4684 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4685 	qlge_tx_ring_clean(qdev);
4686 	qlge_free_rx_buffers(qdev);
4687 	qlge_release_adapter_resources(qdev);
4688 }
4689 
4690 /*
4691  * This callback is called by the PCI subsystem whenever
4692  * a PCI bus error is detected.
4693  */
qlge_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4694 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4695 					       pci_channel_state_t state)
4696 {
4697 	struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4698 	struct net_device *ndev = qdev->ndev;
4699 
4700 	switch (state) {
4701 	case pci_channel_io_normal:
4702 		return PCI_ERS_RESULT_CAN_RECOVER;
4703 	case pci_channel_io_frozen:
4704 		netif_device_detach(ndev);
4705 		del_timer_sync(&qdev->timer);
4706 		if (netif_running(ndev))
4707 			qlge_eeh_close(ndev);
4708 		pci_disable_device(pdev);
4709 		return PCI_ERS_RESULT_NEED_RESET;
4710 	case pci_channel_io_perm_failure:
4711 		dev_err(&pdev->dev,
4712 			"%s: pci_channel_io_perm_failure.\n", __func__);
4713 		del_timer_sync(&qdev->timer);
4714 		qlge_eeh_close(ndev);
4715 		set_bit(QL_EEH_FATAL, &qdev->flags);
4716 		return PCI_ERS_RESULT_DISCONNECT;
4717 	}
4718 
4719 	/* Request a slot reset. */
4720 	return PCI_ERS_RESULT_NEED_RESET;
4721 }
4722 
4723 /*
4724  * This callback is called after the PCI buss has been reset.
4725  * Basically, this tries to restart the card from scratch.
4726  * This is a shortened version of the device probe/discovery code,
4727  * it resembles the first-half of the () routine.
4728  */
qlge_io_slot_reset(struct pci_dev * pdev)4729 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4730 {
4731 	struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4732 
4733 	pdev->error_state = pci_channel_io_normal;
4734 
4735 	pci_restore_state(pdev);
4736 	if (pci_enable_device(pdev)) {
4737 		netif_err(qdev, ifup, qdev->ndev,
4738 			  "Cannot re-enable PCI device after reset.\n");
4739 		return PCI_ERS_RESULT_DISCONNECT;
4740 	}
4741 	pci_set_master(pdev);
4742 
4743 	if (qlge_adapter_reset(qdev)) {
4744 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4745 		set_bit(QL_EEH_FATAL, &qdev->flags);
4746 		return PCI_ERS_RESULT_DISCONNECT;
4747 	}
4748 
4749 	return PCI_ERS_RESULT_RECOVERED;
4750 }
4751 
qlge_io_resume(struct pci_dev * pdev)4752 static void qlge_io_resume(struct pci_dev *pdev)
4753 {
4754 	struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4755 	struct net_device *ndev = qdev->ndev;
4756 	int err = 0;
4757 
4758 	if (netif_running(ndev)) {
4759 		err = qlge_open(ndev);
4760 		if (err) {
4761 			netif_err(qdev, ifup, qdev->ndev,
4762 				  "Device initialization failed after reset.\n");
4763 			return;
4764 		}
4765 	} else {
4766 		netif_err(qdev, ifup, qdev->ndev,
4767 			  "Device was not running prior to EEH.\n");
4768 	}
4769 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4770 	netif_device_attach(ndev);
4771 }
4772 
4773 static const struct pci_error_handlers qlge_err_handler = {
4774 	.error_detected = qlge_io_error_detected,
4775 	.slot_reset = qlge_io_slot_reset,
4776 	.resume = qlge_io_resume,
4777 };
4778 
qlge_suspend(struct device * dev_d)4779 static int __maybe_unused qlge_suspend(struct device *dev_d)
4780 {
4781 	struct pci_dev *pdev = to_pci_dev(dev_d);
4782 	struct qlge_adapter *qdev;
4783 	struct net_device *ndev;
4784 	int err;
4785 
4786 	qdev = pci_get_drvdata(pdev);
4787 	ndev = qdev->ndev;
4788 	netif_device_detach(ndev);
4789 	del_timer_sync(&qdev->timer);
4790 
4791 	if (netif_running(ndev)) {
4792 		err = qlge_adapter_down(qdev);
4793 		if (!err)
4794 			return err;
4795 	}
4796 
4797 	qlge_wol(qdev);
4798 
4799 	return 0;
4800 }
4801 
qlge_resume(struct device * dev_d)4802 static int __maybe_unused qlge_resume(struct device *dev_d)
4803 {
4804 	struct pci_dev *pdev = to_pci_dev(dev_d);
4805 	struct qlge_adapter *qdev;
4806 	struct net_device *ndev;
4807 	int err;
4808 
4809 	qdev = pci_get_drvdata(pdev);
4810 	ndev = qdev->ndev;
4811 
4812 	pci_set_master(pdev);
4813 
4814 	device_wakeup_disable(dev_d);
4815 
4816 	if (netif_running(ndev)) {
4817 		err = qlge_adapter_up(qdev);
4818 		if (err)
4819 			return err;
4820 	}
4821 
4822 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4823 	netif_device_attach(ndev);
4824 
4825 	return 0;
4826 }
4827 
qlge_shutdown(struct pci_dev * pdev)4828 static void qlge_shutdown(struct pci_dev *pdev)
4829 {
4830 	qlge_suspend(&pdev->dev);
4831 }
4832 
4833 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4834 
4835 static struct pci_driver qlge_driver = {
4836 	.name = DRV_NAME,
4837 	.id_table = qlge_pci_tbl,
4838 	.probe = qlge_probe,
4839 	.remove = qlge_remove,
4840 	.driver.pm = &qlge_pm_ops,
4841 	.shutdown = qlge_shutdown,
4842 	.err_handler = &qlge_err_handler
4843 };
4844 
4845 module_pci_driver(qlge_driver);
4846