1 /*******************************************************************************
2 
3   Intel PRO/10GbE Linux driver
4   Copyright(c) 1999 - 2008 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include "ixgb.h"
32 
33 char ixgb_driver_name[] = "ixgb";
34 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
35 
36 #define DRIVERNAPI "-NAPI"
37 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
38 const char ixgb_driver_version[] = DRV_VERSION;
39 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
40 
41 #define IXGB_CB_LENGTH 256
42 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
43 module_param(copybreak, uint, 0644);
44 MODULE_PARM_DESC(copybreak,
45 	"Maximum size of packet that is copied to a new buffer on receive");
46 
47 /* ixgb_pci_tbl - PCI Device ID Table
48  *
49  * Wildcard entries (PCI_ANY_ID) should come last
50  * Last entry must be all 0s
51  *
52  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
53  *   Class, Class Mask, private data (not used) }
54  */
55 static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
56 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
57 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
58 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
59 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
61 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
62 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
63 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
64 
65 	/* required last entry */
66 	{0,}
67 };
68 
69 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
70 
71 /* Local Function Prototypes */
72 static int ixgb_init_module(void);
73 static void ixgb_exit_module(void);
74 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
75 static void __devexit ixgb_remove(struct pci_dev *pdev);
76 static int ixgb_sw_init(struct ixgb_adapter *adapter);
77 static int ixgb_open(struct net_device *netdev);
78 static int ixgb_close(struct net_device *netdev);
79 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
80 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
81 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
82 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
83 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
84 static void ixgb_set_multi(struct net_device *netdev);
85 static void ixgb_watchdog(unsigned long data);
86 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
87 				   struct net_device *netdev);
88 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
89 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
90 static int ixgb_set_mac(struct net_device *netdev, void *p);
91 static irqreturn_t ixgb_intr(int irq, void *data);
92 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
93 
94 static int ixgb_clean(struct napi_struct *, int);
95 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
96 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
97 
98 static void ixgb_tx_timeout(struct net_device *dev);
99 static void ixgb_tx_timeout_task(struct work_struct *work);
100 
101 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
103 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
104 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
105 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
106 
107 #ifdef CONFIG_NET_POLL_CONTROLLER
108 /* for netdump / net console */
109 static void ixgb_netpoll(struct net_device *dev);
110 #endif
111 
112 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
113                              enum pci_channel_state state);
114 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
115 static void ixgb_io_resume (struct pci_dev *pdev);
116 
117 static struct pci_error_handlers ixgb_err_handler = {
118 	.error_detected = ixgb_io_error_detected,
119 	.slot_reset = ixgb_io_slot_reset,
120 	.resume = ixgb_io_resume,
121 };
122 
123 static struct pci_driver ixgb_driver = {
124 	.name     = ixgb_driver_name,
125 	.id_table = ixgb_pci_tbl,
126 	.probe    = ixgb_probe,
127 	.remove   = __devexit_p(ixgb_remove),
128 	.err_handler = &ixgb_err_handler
129 };
130 
131 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
132 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
133 MODULE_LICENSE("GPL");
134 MODULE_VERSION(DRV_VERSION);
135 
136 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
137 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
138 module_param(debug, int, 0);
139 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
140 
141 /**
142  * ixgb_init_module - Driver Registration Routine
143  *
144  * ixgb_init_module is the first routine called when the driver is
145  * loaded. All it does is register with the PCI subsystem.
146  **/
147 
148 static int __init
ixgb_init_module(void)149 ixgb_init_module(void)
150 {
151 	pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
152 	pr_info("%s\n", ixgb_copyright);
153 
154 	return pci_register_driver(&ixgb_driver);
155 }
156 
157 module_init(ixgb_init_module);
158 
159 /**
160  * ixgb_exit_module - Driver Exit Cleanup Routine
161  *
162  * ixgb_exit_module is called just before the driver is removed
163  * from memory.
164  **/
165 
166 static void __exit
ixgb_exit_module(void)167 ixgb_exit_module(void)
168 {
169 	pci_unregister_driver(&ixgb_driver);
170 }
171 
172 module_exit(ixgb_exit_module);
173 
174 /**
175  * ixgb_irq_disable - Mask off interrupt generation on the NIC
176  * @adapter: board private structure
177  **/
178 
179 static void
ixgb_irq_disable(struct ixgb_adapter * adapter)180 ixgb_irq_disable(struct ixgb_adapter *adapter)
181 {
182 	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
183 	IXGB_WRITE_FLUSH(&adapter->hw);
184 	synchronize_irq(adapter->pdev->irq);
185 }
186 
187 /**
188  * ixgb_irq_enable - Enable default interrupt generation settings
189  * @adapter: board private structure
190  **/
191 
192 static void
ixgb_irq_enable(struct ixgb_adapter * adapter)193 ixgb_irq_enable(struct ixgb_adapter *adapter)
194 {
195 	u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
196 		  IXGB_INT_TXDW | IXGB_INT_LSC;
197 	if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
198 		val |= IXGB_INT_GPI0;
199 	IXGB_WRITE_REG(&adapter->hw, IMS, val);
200 	IXGB_WRITE_FLUSH(&adapter->hw);
201 }
202 
203 int
ixgb_up(struct ixgb_adapter * adapter)204 ixgb_up(struct ixgb_adapter *adapter)
205 {
206 	struct net_device *netdev = adapter->netdev;
207 	int err, irq_flags = IRQF_SHARED;
208 	int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
209 	struct ixgb_hw *hw = &adapter->hw;
210 
211 	/* hardware has been reset, we need to reload some things */
212 
213 	ixgb_rar_set(hw, netdev->dev_addr, 0);
214 	ixgb_set_multi(netdev);
215 
216 	ixgb_restore_vlan(adapter);
217 
218 	ixgb_configure_tx(adapter);
219 	ixgb_setup_rctl(adapter);
220 	ixgb_configure_rx(adapter);
221 	ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
222 
223 	/* disable interrupts and get the hardware into a known state */
224 	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
225 
226 	/* only enable MSI if bus is in PCI-X mode */
227 	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
228 		err = pci_enable_msi(adapter->pdev);
229 		if (!err) {
230 			adapter->have_msi = 1;
231 			irq_flags = 0;
232 		}
233 		/* proceed to try to request regular interrupt */
234 	}
235 
236 	err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
237 	                  netdev->name, netdev);
238 	if (err) {
239 		if (adapter->have_msi)
240 			pci_disable_msi(adapter->pdev);
241 		netif_err(adapter, probe, adapter->netdev,
242 			  "Unable to allocate interrupt Error: %d\n", err);
243 		return err;
244 	}
245 
246 	if ((hw->max_frame_size != max_frame) ||
247 		(hw->max_frame_size !=
248 		(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
249 
250 		hw->max_frame_size = max_frame;
251 
252 		IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
253 
254 		if (hw->max_frame_size >
255 		   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
256 			u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
257 
258 			if (!(ctrl0 & IXGB_CTRL0_JFE)) {
259 				ctrl0 |= IXGB_CTRL0_JFE;
260 				IXGB_WRITE_REG(hw, CTRL0, ctrl0);
261 			}
262 		}
263 	}
264 
265 	clear_bit(__IXGB_DOWN, &adapter->flags);
266 
267 	napi_enable(&adapter->napi);
268 	ixgb_irq_enable(adapter);
269 
270 	netif_wake_queue(netdev);
271 
272 	mod_timer(&adapter->watchdog_timer, jiffies);
273 
274 	return 0;
275 }
276 
277 void
ixgb_down(struct ixgb_adapter * adapter,bool kill_watchdog)278 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
279 {
280 	struct net_device *netdev = adapter->netdev;
281 
282 	/* prevent the interrupt handler from restarting watchdog */
283 	set_bit(__IXGB_DOWN, &adapter->flags);
284 
285 	napi_disable(&adapter->napi);
286 	/* waiting for NAPI to complete can re-enable interrupts */
287 	ixgb_irq_disable(adapter);
288 	free_irq(adapter->pdev->irq, netdev);
289 
290 	if (adapter->have_msi)
291 		pci_disable_msi(adapter->pdev);
292 
293 	if (kill_watchdog)
294 		del_timer_sync(&adapter->watchdog_timer);
295 
296 	adapter->link_speed = 0;
297 	adapter->link_duplex = 0;
298 	netif_carrier_off(netdev);
299 	netif_stop_queue(netdev);
300 
301 	ixgb_reset(adapter);
302 	ixgb_clean_tx_ring(adapter);
303 	ixgb_clean_rx_ring(adapter);
304 }
305 
306 void
ixgb_reset(struct ixgb_adapter * adapter)307 ixgb_reset(struct ixgb_adapter *adapter)
308 {
309 	struct ixgb_hw *hw = &adapter->hw;
310 
311 	ixgb_adapter_stop(hw);
312 	if (!ixgb_init_hw(hw))
313 		netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
314 
315 	/* restore frame size information */
316 	IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
317 	if (hw->max_frame_size >
318 	    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
319 		u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
320 		if (!(ctrl0 & IXGB_CTRL0_JFE)) {
321 			ctrl0 |= IXGB_CTRL0_JFE;
322 			IXGB_WRITE_REG(hw, CTRL0, ctrl0);
323 		}
324 	}
325 }
326 
327 static const struct net_device_ops ixgb_netdev_ops = {
328 	.ndo_open 		= ixgb_open,
329 	.ndo_stop		= ixgb_close,
330 	.ndo_start_xmit		= ixgb_xmit_frame,
331 	.ndo_get_stats		= ixgb_get_stats,
332 	.ndo_set_multicast_list	= ixgb_set_multi,
333 	.ndo_validate_addr	= eth_validate_addr,
334 	.ndo_set_mac_address	= ixgb_set_mac,
335 	.ndo_change_mtu		= ixgb_change_mtu,
336 	.ndo_tx_timeout		= ixgb_tx_timeout,
337 	.ndo_vlan_rx_add_vid	= ixgb_vlan_rx_add_vid,
338 	.ndo_vlan_rx_kill_vid	= ixgb_vlan_rx_kill_vid,
339 #ifdef CONFIG_NET_POLL_CONTROLLER
340 	.ndo_poll_controller	= ixgb_netpoll,
341 #endif
342 };
343 
344 /**
345  * ixgb_probe - Device Initialization Routine
346  * @pdev: PCI device information struct
347  * @ent: entry in ixgb_pci_tbl
348  *
349  * Returns 0 on success, negative on failure
350  *
351  * ixgb_probe initializes an adapter identified by a pci_dev structure.
352  * The OS initialization, configuring of the adapter private structure,
353  * and a hardware reset occur.
354  **/
355 
356 static int __devinit
ixgb_probe(struct pci_dev * pdev,const struct pci_device_id * ent)357 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
358 {
359 	struct net_device *netdev = NULL;
360 	struct ixgb_adapter *adapter;
361 	static int cards_found = 0;
362 	int pci_using_dac;
363 	int i;
364 	int err;
365 
366 	err = pci_enable_device(pdev);
367 	if (err)
368 		return err;
369 
370 	pci_using_dac = 0;
371 	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
372 	if (!err) {
373 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
374 		if (!err)
375 			pci_using_dac = 1;
376 	} else {
377 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
378 		if (err) {
379 			err = dma_set_coherent_mask(&pdev->dev,
380 						    DMA_BIT_MASK(32));
381 			if (err) {
382 				pr_err("No usable DMA configuration, aborting\n");
383 				goto err_dma_mask;
384 			}
385 		}
386 	}
387 
388 	err = pci_request_regions(pdev, ixgb_driver_name);
389 	if (err)
390 		goto err_request_regions;
391 
392 	pci_set_master(pdev);
393 
394 	netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
395 	if (!netdev) {
396 		err = -ENOMEM;
397 		goto err_alloc_etherdev;
398 	}
399 
400 	SET_NETDEV_DEV(netdev, &pdev->dev);
401 
402 	pci_set_drvdata(pdev, netdev);
403 	adapter = netdev_priv(netdev);
404 	adapter->netdev = netdev;
405 	adapter->pdev = pdev;
406 	adapter->hw.back = adapter;
407 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
408 
409 	adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
410 	if (!adapter->hw.hw_addr) {
411 		err = -EIO;
412 		goto err_ioremap;
413 	}
414 
415 	for (i = BAR_1; i <= BAR_5; i++) {
416 		if (pci_resource_len(pdev, i) == 0)
417 			continue;
418 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
419 			adapter->hw.io_base = pci_resource_start(pdev, i);
420 			break;
421 		}
422 	}
423 
424 	netdev->netdev_ops = &ixgb_netdev_ops;
425 	ixgb_set_ethtool_ops(netdev);
426 	netdev->watchdog_timeo = 5 * HZ;
427 	netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
428 
429 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
430 
431 	adapter->bd_number = cards_found;
432 	adapter->link_speed = 0;
433 	adapter->link_duplex = 0;
434 
435 	/* setup the private structure */
436 
437 	err = ixgb_sw_init(adapter);
438 	if (err)
439 		goto err_sw_init;
440 
441 	netdev->features = NETIF_F_SG |
442 			   NETIF_F_HW_CSUM |
443 			   NETIF_F_HW_VLAN_TX |
444 			   NETIF_F_HW_VLAN_RX |
445 			   NETIF_F_HW_VLAN_FILTER;
446 	netdev->features |= NETIF_F_TSO;
447 
448 	if (pci_using_dac) {
449 		netdev->features |= NETIF_F_HIGHDMA;
450 		netdev->vlan_features |= NETIF_F_HIGHDMA;
451 	}
452 
453 	/* make sure the EEPROM is good */
454 
455 	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
456 		netif_err(adapter, probe, adapter->netdev,
457 			  "The EEPROM Checksum Is Not Valid\n");
458 		err = -EIO;
459 		goto err_eeprom;
460 	}
461 
462 	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
463 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
464 
465 	if (!is_valid_ether_addr(netdev->perm_addr)) {
466 		netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
467 		err = -EIO;
468 		goto err_eeprom;
469 	}
470 
471 	adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
472 
473 	init_timer(&adapter->watchdog_timer);
474 	adapter->watchdog_timer.function = ixgb_watchdog;
475 	adapter->watchdog_timer.data = (unsigned long)adapter;
476 
477 	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
478 
479 	strcpy(netdev->name, "eth%d");
480 	err = register_netdev(netdev);
481 	if (err)
482 		goto err_register;
483 
484 	/* carrier off reporting is important to ethtool even BEFORE open */
485 	netif_carrier_off(netdev);
486 
487 	netif_info(adapter, probe, adapter->netdev,
488 		   "Intel(R) PRO/10GbE Network Connection\n");
489 	ixgb_check_options(adapter);
490 	/* reset the hardware with the new settings */
491 
492 	ixgb_reset(adapter);
493 
494 	cards_found++;
495 	return 0;
496 
497 err_register:
498 err_sw_init:
499 err_eeprom:
500 	iounmap(adapter->hw.hw_addr);
501 err_ioremap:
502 	free_netdev(netdev);
503 err_alloc_etherdev:
504 	pci_release_regions(pdev);
505 err_request_regions:
506 err_dma_mask:
507 	pci_disable_device(pdev);
508 	return err;
509 }
510 
511 /**
512  * ixgb_remove - Device Removal Routine
513  * @pdev: PCI device information struct
514  *
515  * ixgb_remove is called by the PCI subsystem to alert the driver
516  * that it should release a PCI device.  The could be caused by a
517  * Hot-Plug event, or because the driver is going to be removed from
518  * memory.
519  **/
520 
521 static void __devexit
ixgb_remove(struct pci_dev * pdev)522 ixgb_remove(struct pci_dev *pdev)
523 {
524 	struct net_device *netdev = pci_get_drvdata(pdev);
525 	struct ixgb_adapter *adapter = netdev_priv(netdev);
526 
527 	cancel_work_sync(&adapter->tx_timeout_task);
528 
529 	unregister_netdev(netdev);
530 
531 	iounmap(adapter->hw.hw_addr);
532 	pci_release_regions(pdev);
533 
534 	free_netdev(netdev);
535 	pci_disable_device(pdev);
536 }
537 
538 /**
539  * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
540  * @adapter: board private structure to initialize
541  *
542  * ixgb_sw_init initializes the Adapter private data structure.
543  * Fields are initialized based on PCI device information and
544  * OS network device settings (MTU size).
545  **/
546 
547 static int __devinit
ixgb_sw_init(struct ixgb_adapter * adapter)548 ixgb_sw_init(struct ixgb_adapter *adapter)
549 {
550 	struct ixgb_hw *hw = &adapter->hw;
551 	struct net_device *netdev = adapter->netdev;
552 	struct pci_dev *pdev = adapter->pdev;
553 
554 	/* PCI config space info */
555 
556 	hw->vendor_id = pdev->vendor;
557 	hw->device_id = pdev->device;
558 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
559 	hw->subsystem_id = pdev->subsystem_device;
560 
561 	hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
562 	adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
563 
564 	if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
565 	    (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
566 	    (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
567 	    (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
568 		hw->mac_type = ixgb_82597;
569 	else {
570 		/* should never have loaded on this device */
571 		netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
572 	}
573 
574 	/* enable flow control to be programmed */
575 	hw->fc.send_xon = 1;
576 
577 	set_bit(__IXGB_DOWN, &adapter->flags);
578 	return 0;
579 }
580 
581 /**
582  * ixgb_open - Called when a network interface is made active
583  * @netdev: network interface device structure
584  *
585  * Returns 0 on success, negative value on failure
586  *
587  * The open entry point is called when a network interface is made
588  * active by the system (IFF_UP).  At this point all resources needed
589  * for transmit and receive operations are allocated, the interrupt
590  * handler is registered with the OS, the watchdog timer is started,
591  * and the stack is notified that the interface is ready.
592  **/
593 
594 static int
ixgb_open(struct net_device * netdev)595 ixgb_open(struct net_device *netdev)
596 {
597 	struct ixgb_adapter *adapter = netdev_priv(netdev);
598 	int err;
599 
600 	/* allocate transmit descriptors */
601 	err = ixgb_setup_tx_resources(adapter);
602 	if (err)
603 		goto err_setup_tx;
604 
605 	netif_carrier_off(netdev);
606 
607 	/* allocate receive descriptors */
608 
609 	err = ixgb_setup_rx_resources(adapter);
610 	if (err)
611 		goto err_setup_rx;
612 
613 	err = ixgb_up(adapter);
614 	if (err)
615 		goto err_up;
616 
617 	netif_start_queue(netdev);
618 
619 	return 0;
620 
621 err_up:
622 	ixgb_free_rx_resources(adapter);
623 err_setup_rx:
624 	ixgb_free_tx_resources(adapter);
625 err_setup_tx:
626 	ixgb_reset(adapter);
627 
628 	return err;
629 }
630 
631 /**
632  * ixgb_close - Disables a network interface
633  * @netdev: network interface device structure
634  *
635  * Returns 0, this is not allowed to fail
636  *
637  * The close entry point is called when an interface is de-activated
638  * by the OS.  The hardware is still under the drivers control, but
639  * needs to be disabled.  A global MAC reset is issued to stop the
640  * hardware, and all transmit and receive resources are freed.
641  **/
642 
643 static int
ixgb_close(struct net_device * netdev)644 ixgb_close(struct net_device *netdev)
645 {
646 	struct ixgb_adapter *adapter = netdev_priv(netdev);
647 
648 	ixgb_down(adapter, true);
649 
650 	ixgb_free_tx_resources(adapter);
651 	ixgb_free_rx_resources(adapter);
652 
653 	return 0;
654 }
655 
656 /**
657  * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
658  * @adapter: board private structure
659  *
660  * Return 0 on success, negative on failure
661  **/
662 
663 int
ixgb_setup_tx_resources(struct ixgb_adapter * adapter)664 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
665 {
666 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
667 	struct pci_dev *pdev = adapter->pdev;
668 	int size;
669 
670 	size = sizeof(struct ixgb_buffer) * txdr->count;
671 	txdr->buffer_info = vzalloc(size);
672 	if (!txdr->buffer_info) {
673 		netif_err(adapter, probe, adapter->netdev,
674 			  "Unable to allocate transmit descriptor ring memory\n");
675 		return -ENOMEM;
676 	}
677 
678 	/* round up to nearest 4K */
679 
680 	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681 	txdr->size = ALIGN(txdr->size, 4096);
682 
683 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684 					GFP_KERNEL);
685 	if (!txdr->desc) {
686 		vfree(txdr->buffer_info);
687 		netif_err(adapter, probe, adapter->netdev,
688 			  "Unable to allocate transmit descriptor memory\n");
689 		return -ENOMEM;
690 	}
691 	memset(txdr->desc, 0, txdr->size);
692 
693 	txdr->next_to_use = 0;
694 	txdr->next_to_clean = 0;
695 
696 	return 0;
697 }
698 
699 /**
700  * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
701  * @adapter: board private structure
702  *
703  * Configure the Tx unit of the MAC after a reset.
704  **/
705 
706 static void
ixgb_configure_tx(struct ixgb_adapter * adapter)707 ixgb_configure_tx(struct ixgb_adapter *adapter)
708 {
709 	u64 tdba = adapter->tx_ring.dma;
710 	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
711 	u32 tctl;
712 	struct ixgb_hw *hw = &adapter->hw;
713 
714 	/* Setup the Base and Length of the Tx Descriptor Ring
715 	 * tx_ring.dma can be either a 32 or 64 bit value
716 	 */
717 
718 	IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
719 	IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
720 
721 	IXGB_WRITE_REG(hw, TDLEN, tdlen);
722 
723 	/* Setup the HW Tx Head and Tail descriptor pointers */
724 
725 	IXGB_WRITE_REG(hw, TDH, 0);
726 	IXGB_WRITE_REG(hw, TDT, 0);
727 
728 	/* don't set up txdctl, it induces performance problems if configured
729 	 * incorrectly */
730 	/* Set the Tx Interrupt Delay register */
731 
732 	IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
733 
734 	/* Program the Transmit Control Register */
735 
736 	tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
737 	IXGB_WRITE_REG(hw, TCTL, tctl);
738 
739 	/* Setup Transmit Descriptor Settings for this adapter */
740 	adapter->tx_cmd_type =
741 		IXGB_TX_DESC_TYPE |
742 		(adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
743 }
744 
745 /**
746  * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
747  * @adapter: board private structure
748  *
749  * Returns 0 on success, negative on failure
750  **/
751 
752 int
ixgb_setup_rx_resources(struct ixgb_adapter * adapter)753 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
754 {
755 	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
756 	struct pci_dev *pdev = adapter->pdev;
757 	int size;
758 
759 	size = sizeof(struct ixgb_buffer) * rxdr->count;
760 	rxdr->buffer_info = vzalloc(size);
761 	if (!rxdr->buffer_info) {
762 		netif_err(adapter, probe, adapter->netdev,
763 			  "Unable to allocate receive descriptor ring\n");
764 		return -ENOMEM;
765 	}
766 
767 	/* Round up to nearest 4K */
768 
769 	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
770 	rxdr->size = ALIGN(rxdr->size, 4096);
771 
772 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
773 					GFP_KERNEL);
774 
775 	if (!rxdr->desc) {
776 		vfree(rxdr->buffer_info);
777 		netif_err(adapter, probe, adapter->netdev,
778 			  "Unable to allocate receive descriptors\n");
779 		return -ENOMEM;
780 	}
781 	memset(rxdr->desc, 0, rxdr->size);
782 
783 	rxdr->next_to_clean = 0;
784 	rxdr->next_to_use = 0;
785 
786 	return 0;
787 }
788 
789 /**
790  * ixgb_setup_rctl - configure the receive control register
791  * @adapter: Board private structure
792  **/
793 
794 static void
ixgb_setup_rctl(struct ixgb_adapter * adapter)795 ixgb_setup_rctl(struct ixgb_adapter *adapter)
796 {
797 	u32 rctl;
798 
799 	rctl = IXGB_READ_REG(&adapter->hw, RCTL);
800 
801 	rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
802 
803 	rctl |=
804 		IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
805 		IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
806 		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
807 
808 	rctl |= IXGB_RCTL_SECRC;
809 
810 	if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
811 		rctl |= IXGB_RCTL_BSIZE_2048;
812 	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
813 		rctl |= IXGB_RCTL_BSIZE_4096;
814 	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
815 		rctl |= IXGB_RCTL_BSIZE_8192;
816 	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
817 		rctl |= IXGB_RCTL_BSIZE_16384;
818 
819 	IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
820 }
821 
822 /**
823  * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
824  * @adapter: board private structure
825  *
826  * Configure the Rx unit of the MAC after a reset.
827  **/
828 
829 static void
ixgb_configure_rx(struct ixgb_adapter * adapter)830 ixgb_configure_rx(struct ixgb_adapter *adapter)
831 {
832 	u64 rdba = adapter->rx_ring.dma;
833 	u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
834 	struct ixgb_hw *hw = &adapter->hw;
835 	u32 rctl;
836 	u32 rxcsum;
837 
838 	/* make sure receives are disabled while setting up the descriptors */
839 
840 	rctl = IXGB_READ_REG(hw, RCTL);
841 	IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
842 
843 	/* set the Receive Delay Timer Register */
844 
845 	IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
846 
847 	/* Setup the Base and Length of the Rx Descriptor Ring */
848 
849 	IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
850 	IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
851 
852 	IXGB_WRITE_REG(hw, RDLEN, rdlen);
853 
854 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
855 	IXGB_WRITE_REG(hw, RDH, 0);
856 	IXGB_WRITE_REG(hw, RDT, 0);
857 
858 	/* due to the hardware errata with RXDCTL, we are unable to use any of
859 	 * the performance enhancing features of it without causing other
860 	 * subtle bugs, some of the bugs could include receive length
861 	 * corruption at high data rates (WTHRESH > 0) and/or receive
862 	 * descriptor ring irregularites (particularly in hardware cache) */
863 	IXGB_WRITE_REG(hw, RXDCTL, 0);
864 
865 	/* Enable Receive Checksum Offload for TCP and UDP */
866 	if (adapter->rx_csum) {
867 		rxcsum = IXGB_READ_REG(hw, RXCSUM);
868 		rxcsum |= IXGB_RXCSUM_TUOFL;
869 		IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
870 	}
871 
872 	/* Enable Receives */
873 
874 	IXGB_WRITE_REG(hw, RCTL, rctl);
875 }
876 
877 /**
878  * ixgb_free_tx_resources - Free Tx Resources
879  * @adapter: board private structure
880  *
881  * Free all transmit software resources
882  **/
883 
884 void
ixgb_free_tx_resources(struct ixgb_adapter * adapter)885 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
886 {
887 	struct pci_dev *pdev = adapter->pdev;
888 
889 	ixgb_clean_tx_ring(adapter);
890 
891 	vfree(adapter->tx_ring.buffer_info);
892 	adapter->tx_ring.buffer_info = NULL;
893 
894 	dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
895 			  adapter->tx_ring.desc, adapter->tx_ring.dma);
896 
897 	adapter->tx_ring.desc = NULL;
898 }
899 
900 static void
ixgb_unmap_and_free_tx_resource(struct ixgb_adapter * adapter,struct ixgb_buffer * buffer_info)901 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
902                                 struct ixgb_buffer *buffer_info)
903 {
904 	if (buffer_info->dma) {
905 		if (buffer_info->mapped_as_page)
906 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
907 				       buffer_info->length, DMA_TO_DEVICE);
908 		else
909 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
910 					 buffer_info->length, DMA_TO_DEVICE);
911 		buffer_info->dma = 0;
912 	}
913 
914 	if (buffer_info->skb) {
915 		dev_kfree_skb_any(buffer_info->skb);
916 		buffer_info->skb = NULL;
917 	}
918 	buffer_info->time_stamp = 0;
919 	/* these fields must always be initialized in tx
920 	 * buffer_info->length = 0;
921 	 * buffer_info->next_to_watch = 0; */
922 }
923 
924 /**
925  * ixgb_clean_tx_ring - Free Tx Buffers
926  * @adapter: board private structure
927  **/
928 
929 static void
ixgb_clean_tx_ring(struct ixgb_adapter * adapter)930 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
931 {
932 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
933 	struct ixgb_buffer *buffer_info;
934 	unsigned long size;
935 	unsigned int i;
936 
937 	/* Free all the Tx ring sk_buffs */
938 
939 	for (i = 0; i < tx_ring->count; i++) {
940 		buffer_info = &tx_ring->buffer_info[i];
941 		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
942 	}
943 
944 	size = sizeof(struct ixgb_buffer) * tx_ring->count;
945 	memset(tx_ring->buffer_info, 0, size);
946 
947 	/* Zero out the descriptor ring */
948 
949 	memset(tx_ring->desc, 0, tx_ring->size);
950 
951 	tx_ring->next_to_use = 0;
952 	tx_ring->next_to_clean = 0;
953 
954 	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
955 	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
956 }
957 
958 /**
959  * ixgb_free_rx_resources - Free Rx Resources
960  * @adapter: board private structure
961  *
962  * Free all receive software resources
963  **/
964 
965 void
ixgb_free_rx_resources(struct ixgb_adapter * adapter)966 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
967 {
968 	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
969 	struct pci_dev *pdev = adapter->pdev;
970 
971 	ixgb_clean_rx_ring(adapter);
972 
973 	vfree(rx_ring->buffer_info);
974 	rx_ring->buffer_info = NULL;
975 
976 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
977 			  rx_ring->dma);
978 
979 	rx_ring->desc = NULL;
980 }
981 
982 /**
983  * ixgb_clean_rx_ring - Free Rx Buffers
984  * @adapter: board private structure
985  **/
986 
987 static void
ixgb_clean_rx_ring(struct ixgb_adapter * adapter)988 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
989 {
990 	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
991 	struct ixgb_buffer *buffer_info;
992 	struct pci_dev *pdev = adapter->pdev;
993 	unsigned long size;
994 	unsigned int i;
995 
996 	/* Free all the Rx ring sk_buffs */
997 
998 	for (i = 0; i < rx_ring->count; i++) {
999 		buffer_info = &rx_ring->buffer_info[i];
1000 		if (buffer_info->dma) {
1001 			dma_unmap_single(&pdev->dev,
1002 					 buffer_info->dma,
1003 					 buffer_info->length,
1004 					 DMA_FROM_DEVICE);
1005 			buffer_info->dma = 0;
1006 			buffer_info->length = 0;
1007 		}
1008 
1009 		if (buffer_info->skb) {
1010 			dev_kfree_skb(buffer_info->skb);
1011 			buffer_info->skb = NULL;
1012 		}
1013 	}
1014 
1015 	size = sizeof(struct ixgb_buffer) * rx_ring->count;
1016 	memset(rx_ring->buffer_info, 0, size);
1017 
1018 	/* Zero out the descriptor ring */
1019 
1020 	memset(rx_ring->desc, 0, rx_ring->size);
1021 
1022 	rx_ring->next_to_clean = 0;
1023 	rx_ring->next_to_use = 0;
1024 
1025 	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1026 	IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1027 }
1028 
1029 /**
1030  * ixgb_set_mac - Change the Ethernet Address of the NIC
1031  * @netdev: network interface device structure
1032  * @p: pointer to an address structure
1033  *
1034  * Returns 0 on success, negative on failure
1035  **/
1036 
1037 static int
ixgb_set_mac(struct net_device * netdev,void * p)1038 ixgb_set_mac(struct net_device *netdev, void *p)
1039 {
1040 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1041 	struct sockaddr *addr = p;
1042 
1043 	if (!is_valid_ether_addr(addr->sa_data))
1044 		return -EADDRNOTAVAIL;
1045 
1046 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1047 
1048 	ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * ixgb_set_multi - Multicast and Promiscuous mode set
1055  * @netdev: network interface device structure
1056  *
1057  * The set_multi entry point is called whenever the multicast address
1058  * list or the network interface flags are updated.  This routine is
1059  * responsible for configuring the hardware for proper multicast,
1060  * promiscuous mode, and all-multi behavior.
1061  **/
1062 
1063 static void
ixgb_set_multi(struct net_device * netdev)1064 ixgb_set_multi(struct net_device *netdev)
1065 {
1066 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1067 	struct ixgb_hw *hw = &adapter->hw;
1068 	struct netdev_hw_addr *ha;
1069 	u32 rctl;
1070 	int i;
1071 
1072 	/* Check for Promiscuous and All Multicast modes */
1073 
1074 	rctl = IXGB_READ_REG(hw, RCTL);
1075 
1076 	if (netdev->flags & IFF_PROMISC) {
1077 		rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1078 		/* disable VLAN filtering */
1079 		rctl &= ~IXGB_RCTL_CFIEN;
1080 		rctl &= ~IXGB_RCTL_VFE;
1081 	} else {
1082 		if (netdev->flags & IFF_ALLMULTI) {
1083 			rctl |= IXGB_RCTL_MPE;
1084 			rctl &= ~IXGB_RCTL_UPE;
1085 		} else {
1086 			rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1087 		}
1088 		/* enable VLAN filtering */
1089 		rctl |= IXGB_RCTL_VFE;
1090 		rctl &= ~IXGB_RCTL_CFIEN;
1091 	}
1092 
1093 	if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1094 		rctl |= IXGB_RCTL_MPE;
1095 		IXGB_WRITE_REG(hw, RCTL, rctl);
1096 	} else {
1097 		u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1098 			    IXGB_ETH_LENGTH_OF_ADDRESS];
1099 
1100 		IXGB_WRITE_REG(hw, RCTL, rctl);
1101 
1102 		i = 0;
1103 		netdev_for_each_mc_addr(ha, netdev)
1104 			memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
1105 			       ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1106 
1107 		ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1108 	}
1109 
1110 	if (netdev->features & NETIF_F_HW_VLAN_RX)
1111 		ixgb_vlan_strip_enable(adapter);
1112 	else
1113 		ixgb_vlan_strip_disable(adapter);
1114 
1115 }
1116 
1117 /**
1118  * ixgb_watchdog - Timer Call-back
1119  * @data: pointer to netdev cast into an unsigned long
1120  **/
1121 
1122 static void
ixgb_watchdog(unsigned long data)1123 ixgb_watchdog(unsigned long data)
1124 {
1125 	struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1126 	struct net_device *netdev = adapter->netdev;
1127 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1128 
1129 	ixgb_check_for_link(&adapter->hw);
1130 
1131 	if (ixgb_check_for_bad_link(&adapter->hw)) {
1132 		/* force the reset path */
1133 		netif_stop_queue(netdev);
1134 	}
1135 
1136 	if (adapter->hw.link_up) {
1137 		if (!netif_carrier_ok(netdev)) {
1138 			netdev_info(netdev,
1139 				    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1140 				    (adapter->hw.fc.type == ixgb_fc_full) ?
1141 				    "RX/TX" :
1142 				    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1143 				     "RX" :
1144 				    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1145 				    "TX" : "None");
1146 			adapter->link_speed = 10000;
1147 			adapter->link_duplex = FULL_DUPLEX;
1148 			netif_carrier_on(netdev);
1149 		}
1150 	} else {
1151 		if (netif_carrier_ok(netdev)) {
1152 			adapter->link_speed = 0;
1153 			adapter->link_duplex = 0;
1154 			netdev_info(netdev, "NIC Link is Down\n");
1155 			netif_carrier_off(netdev);
1156 		}
1157 	}
1158 
1159 	ixgb_update_stats(adapter);
1160 
1161 	if (!netif_carrier_ok(netdev)) {
1162 		if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1163 			/* We've lost link, so the controller stops DMA,
1164 			 * but we've got queued Tx work that's never going
1165 			 * to get done, so reset controller to flush Tx.
1166 			 * (Do the reset outside of interrupt context). */
1167 			schedule_work(&adapter->tx_timeout_task);
1168 			/* return immediately since reset is imminent */
1169 			return;
1170 		}
1171 	}
1172 
1173 	/* Force detection of hung controller every watchdog period */
1174 	adapter->detect_tx_hung = true;
1175 
1176 	/* generate an interrupt to force clean up of any stragglers */
1177 	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1178 
1179 	/* Reset the timer */
1180 	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1181 }
1182 
1183 #define IXGB_TX_FLAGS_CSUM		0x00000001
1184 #define IXGB_TX_FLAGS_VLAN		0x00000002
1185 #define IXGB_TX_FLAGS_TSO		0x00000004
1186 
1187 static int
ixgb_tso(struct ixgb_adapter * adapter,struct sk_buff * skb)1188 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1189 {
1190 	struct ixgb_context_desc *context_desc;
1191 	unsigned int i;
1192 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
1193 	u16 ipcse, tucse, mss;
1194 	int err;
1195 
1196 	if (likely(skb_is_gso(skb))) {
1197 		struct ixgb_buffer *buffer_info;
1198 		struct iphdr *iph;
1199 
1200 		if (skb_header_cloned(skb)) {
1201 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1202 			if (err)
1203 				return err;
1204 		}
1205 
1206 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1207 		mss = skb_shinfo(skb)->gso_size;
1208 		iph = ip_hdr(skb);
1209 		iph->tot_len = 0;
1210 		iph->check = 0;
1211 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1212 							 iph->daddr, 0,
1213 							 IPPROTO_TCP, 0);
1214 		ipcss = skb_network_offset(skb);
1215 		ipcso = (void *)&(iph->check) - (void *)skb->data;
1216 		ipcse = skb_transport_offset(skb) - 1;
1217 		tucss = skb_transport_offset(skb);
1218 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1219 		tucse = 0;
1220 
1221 		i = adapter->tx_ring.next_to_use;
1222 		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1223 		buffer_info = &adapter->tx_ring.buffer_info[i];
1224 		WARN_ON(buffer_info->dma != 0);
1225 
1226 		context_desc->ipcss = ipcss;
1227 		context_desc->ipcso = ipcso;
1228 		context_desc->ipcse = cpu_to_le16(ipcse);
1229 		context_desc->tucss = tucss;
1230 		context_desc->tucso = tucso;
1231 		context_desc->tucse = cpu_to_le16(tucse);
1232 		context_desc->mss = cpu_to_le16(mss);
1233 		context_desc->hdr_len = hdr_len;
1234 		context_desc->status = 0;
1235 		context_desc->cmd_type_len = cpu_to_le32(
1236 						  IXGB_CONTEXT_DESC_TYPE
1237 						| IXGB_CONTEXT_DESC_CMD_TSE
1238 						| IXGB_CONTEXT_DESC_CMD_IP
1239 						| IXGB_CONTEXT_DESC_CMD_TCP
1240 						| IXGB_CONTEXT_DESC_CMD_IDE
1241 						| (skb->len - (hdr_len)));
1242 
1243 
1244 		if (++i == adapter->tx_ring.count) i = 0;
1245 		adapter->tx_ring.next_to_use = i;
1246 
1247 		return 1;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static bool
ixgb_tx_csum(struct ixgb_adapter * adapter,struct sk_buff * skb)1254 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1255 {
1256 	struct ixgb_context_desc *context_desc;
1257 	unsigned int i;
1258 	u8 css, cso;
1259 
1260 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1261 		struct ixgb_buffer *buffer_info;
1262 		css = skb_checksum_start_offset(skb);
1263 		cso = css + skb->csum_offset;
1264 
1265 		i = adapter->tx_ring.next_to_use;
1266 		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1267 		buffer_info = &adapter->tx_ring.buffer_info[i];
1268 		WARN_ON(buffer_info->dma != 0);
1269 
1270 		context_desc->tucss = css;
1271 		context_desc->tucso = cso;
1272 		context_desc->tucse = 0;
1273 		/* zero out any previously existing data in one instruction */
1274 		*(u32 *)&(context_desc->ipcss) = 0;
1275 		context_desc->status = 0;
1276 		context_desc->hdr_len = 0;
1277 		context_desc->mss = 0;
1278 		context_desc->cmd_type_len =
1279 			cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1280 				    | IXGB_TX_DESC_CMD_IDE);
1281 
1282 		if (++i == adapter->tx_ring.count) i = 0;
1283 		adapter->tx_ring.next_to_use = i;
1284 
1285 		return true;
1286 	}
1287 
1288 	return false;
1289 }
1290 
1291 #define IXGB_MAX_TXD_PWR	14
1292 #define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)
1293 
1294 static int
ixgb_tx_map(struct ixgb_adapter * adapter,struct sk_buff * skb,unsigned int first)1295 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1296 	    unsigned int first)
1297 {
1298 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1299 	struct pci_dev *pdev = adapter->pdev;
1300 	struct ixgb_buffer *buffer_info;
1301 	int len = skb_headlen(skb);
1302 	unsigned int offset = 0, size, count = 0, i;
1303 	unsigned int mss = skb_shinfo(skb)->gso_size;
1304 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1305 	unsigned int f;
1306 
1307 	i = tx_ring->next_to_use;
1308 
1309 	while (len) {
1310 		buffer_info = &tx_ring->buffer_info[i];
1311 		size = min(len, IXGB_MAX_DATA_PER_TXD);
1312 		/* Workaround for premature desc write-backs
1313 		 * in TSO mode.  Append 4-byte sentinel desc */
1314 		if (unlikely(mss && !nr_frags && size == len && size > 8))
1315 			size -= 4;
1316 
1317 		buffer_info->length = size;
1318 		WARN_ON(buffer_info->dma != 0);
1319 		buffer_info->time_stamp = jiffies;
1320 		buffer_info->mapped_as_page = false;
1321 		buffer_info->dma = dma_map_single(&pdev->dev,
1322 						  skb->data + offset,
1323 						  size, DMA_TO_DEVICE);
1324 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1325 			goto dma_error;
1326 		buffer_info->next_to_watch = 0;
1327 
1328 		len -= size;
1329 		offset += size;
1330 		count++;
1331 		if (len) {
1332 			i++;
1333 			if (i == tx_ring->count)
1334 				i = 0;
1335 		}
1336 	}
1337 
1338 	for (f = 0; f < nr_frags; f++) {
1339 		struct skb_frag_struct *frag;
1340 
1341 		frag = &skb_shinfo(skb)->frags[f];
1342 		len = frag->size;
1343 		offset = frag->page_offset;
1344 
1345 		while (len) {
1346 			i++;
1347 			if (i == tx_ring->count)
1348 				i = 0;
1349 
1350 			buffer_info = &tx_ring->buffer_info[i];
1351 			size = min(len, IXGB_MAX_DATA_PER_TXD);
1352 
1353 			/* Workaround for premature desc write-backs
1354 			 * in TSO mode.  Append 4-byte sentinel desc */
1355 			if (unlikely(mss && (f == (nr_frags - 1))
1356 				     && size == len && size > 8))
1357 				size -= 4;
1358 
1359 			buffer_info->length = size;
1360 			buffer_info->time_stamp = jiffies;
1361 			buffer_info->mapped_as_page = true;
1362 			buffer_info->dma =
1363 				dma_map_page(&pdev->dev, frag->page,
1364 					     offset, size, DMA_TO_DEVICE);
1365 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1366 				goto dma_error;
1367 			buffer_info->next_to_watch = 0;
1368 
1369 			len -= size;
1370 			offset += size;
1371 			count++;
1372 		}
1373 	}
1374 	tx_ring->buffer_info[i].skb = skb;
1375 	tx_ring->buffer_info[first].next_to_watch = i;
1376 
1377 	return count;
1378 
1379 dma_error:
1380 	dev_err(&pdev->dev, "TX DMA map failed\n");
1381 	buffer_info->dma = 0;
1382 	if (count)
1383 		count--;
1384 
1385 	while (count--) {
1386 		if (i==0)
1387 			i += tx_ring->count;
1388 		i--;
1389 		buffer_info = &tx_ring->buffer_info[i];
1390 		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 static void
ixgb_tx_queue(struct ixgb_adapter * adapter,int count,int vlan_id,int tx_flags)1397 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1398 {
1399 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1400 	struct ixgb_tx_desc *tx_desc = NULL;
1401 	struct ixgb_buffer *buffer_info;
1402 	u32 cmd_type_len = adapter->tx_cmd_type;
1403 	u8 status = 0;
1404 	u8 popts = 0;
1405 	unsigned int i;
1406 
1407 	if (tx_flags & IXGB_TX_FLAGS_TSO) {
1408 		cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1409 		popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1410 	}
1411 
1412 	if (tx_flags & IXGB_TX_FLAGS_CSUM)
1413 		popts |= IXGB_TX_DESC_POPTS_TXSM;
1414 
1415 	if (tx_flags & IXGB_TX_FLAGS_VLAN)
1416 		cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1417 
1418 	i = tx_ring->next_to_use;
1419 
1420 	while (count--) {
1421 		buffer_info = &tx_ring->buffer_info[i];
1422 		tx_desc = IXGB_TX_DESC(*tx_ring, i);
1423 		tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1424 		tx_desc->cmd_type_len =
1425 			cpu_to_le32(cmd_type_len | buffer_info->length);
1426 		tx_desc->status = status;
1427 		tx_desc->popts = popts;
1428 		tx_desc->vlan = cpu_to_le16(vlan_id);
1429 
1430 		if (++i == tx_ring->count) i = 0;
1431 	}
1432 
1433 	tx_desc->cmd_type_len |=
1434 		cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1435 
1436 	/* Force memory writes to complete before letting h/w
1437 	 * know there are new descriptors to fetch.  (Only
1438 	 * applicable for weak-ordered memory model archs,
1439 	 * such as IA-64). */
1440 	wmb();
1441 
1442 	tx_ring->next_to_use = i;
1443 	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1444 }
1445 
__ixgb_maybe_stop_tx(struct net_device * netdev,int size)1446 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1447 {
1448 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1449 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1450 
1451 	netif_stop_queue(netdev);
1452 	/* Herbert's original patch had:
1453 	 *  smp_mb__after_netif_stop_queue();
1454 	 * but since that doesn't exist yet, just open code it. */
1455 	smp_mb();
1456 
1457 	/* We need to check again in a case another CPU has just
1458 	 * made room available. */
1459 	if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1460 		return -EBUSY;
1461 
1462 	/* A reprieve! */
1463 	netif_start_queue(netdev);
1464 	++adapter->restart_queue;
1465 	return 0;
1466 }
1467 
ixgb_maybe_stop_tx(struct net_device * netdev,struct ixgb_desc_ring * tx_ring,int size)1468 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1469                               struct ixgb_desc_ring *tx_ring, int size)
1470 {
1471 	if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1472 		return 0;
1473 	return __ixgb_maybe_stop_tx(netdev, size);
1474 }
1475 
1476 
1477 /* Tx Descriptors needed, worst case */
1478 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1479 			 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1480 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1481 	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1482 	+ 1 /* one more needed for sentinel TSO workaround */
1483 
1484 static netdev_tx_t
ixgb_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1485 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1486 {
1487 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1488 	unsigned int first;
1489 	unsigned int tx_flags = 0;
1490 	int vlan_id = 0;
1491 	int count = 0;
1492 	int tso;
1493 
1494 	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1495 		dev_kfree_skb(skb);
1496 		return NETDEV_TX_OK;
1497 	}
1498 
1499 	if (skb->len <= 0) {
1500 		dev_kfree_skb(skb);
1501 		return NETDEV_TX_OK;
1502 	}
1503 
1504 	if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1505                      DESC_NEEDED)))
1506 		return NETDEV_TX_BUSY;
1507 
1508 	if (vlan_tx_tag_present(skb)) {
1509 		tx_flags |= IXGB_TX_FLAGS_VLAN;
1510 		vlan_id = vlan_tx_tag_get(skb);
1511 	}
1512 
1513 	first = adapter->tx_ring.next_to_use;
1514 
1515 	tso = ixgb_tso(adapter, skb);
1516 	if (tso < 0) {
1517 		dev_kfree_skb(skb);
1518 		return NETDEV_TX_OK;
1519 	}
1520 
1521 	if (likely(tso))
1522 		tx_flags |= IXGB_TX_FLAGS_TSO;
1523 	else if (ixgb_tx_csum(adapter, skb))
1524 		tx_flags |= IXGB_TX_FLAGS_CSUM;
1525 
1526 	count = ixgb_tx_map(adapter, skb, first);
1527 
1528 	if (count) {
1529 		ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1530 		/* Make sure there is space in the ring for the next send. */
1531 		ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1532 
1533 	} else {
1534 		dev_kfree_skb_any(skb);
1535 		adapter->tx_ring.buffer_info[first].time_stamp = 0;
1536 		adapter->tx_ring.next_to_use = first;
1537 	}
1538 
1539 	return NETDEV_TX_OK;
1540 }
1541 
1542 /**
1543  * ixgb_tx_timeout - Respond to a Tx Hang
1544  * @netdev: network interface device structure
1545  **/
1546 
1547 static void
ixgb_tx_timeout(struct net_device * netdev)1548 ixgb_tx_timeout(struct net_device *netdev)
1549 {
1550 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1551 
1552 	/* Do the reset outside of interrupt context */
1553 	schedule_work(&adapter->tx_timeout_task);
1554 }
1555 
1556 static void
ixgb_tx_timeout_task(struct work_struct * work)1557 ixgb_tx_timeout_task(struct work_struct *work)
1558 {
1559 	struct ixgb_adapter *adapter =
1560 		container_of(work, struct ixgb_adapter, tx_timeout_task);
1561 
1562 	adapter->tx_timeout_count++;
1563 	ixgb_down(adapter, true);
1564 	ixgb_up(adapter);
1565 }
1566 
1567 /**
1568  * ixgb_get_stats - Get System Network Statistics
1569  * @netdev: network interface device structure
1570  *
1571  * Returns the address of the device statistics structure.
1572  * The statistics are actually updated from the timer callback.
1573  **/
1574 
1575 static struct net_device_stats *
ixgb_get_stats(struct net_device * netdev)1576 ixgb_get_stats(struct net_device *netdev)
1577 {
1578 	return &netdev->stats;
1579 }
1580 
1581 /**
1582  * ixgb_change_mtu - Change the Maximum Transfer Unit
1583  * @netdev: network interface device structure
1584  * @new_mtu: new value for maximum frame size
1585  *
1586  * Returns 0 on success, negative on failure
1587  **/
1588 
1589 static int
ixgb_change_mtu(struct net_device * netdev,int new_mtu)1590 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1591 {
1592 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1593 	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1594 	int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1595 
1596 	/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1597 	if ((new_mtu < 68) ||
1598 	    (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1599 		netif_err(adapter, probe, adapter->netdev,
1600 			  "Invalid MTU setting %d\n", new_mtu);
1601 		return -EINVAL;
1602 	}
1603 
1604 	if (old_max_frame == max_frame)
1605 		return 0;
1606 
1607 	if (netif_running(netdev))
1608 		ixgb_down(adapter, true);
1609 
1610 	adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1611 
1612 	netdev->mtu = new_mtu;
1613 
1614 	if (netif_running(netdev))
1615 		ixgb_up(adapter);
1616 
1617 	return 0;
1618 }
1619 
1620 /**
1621  * ixgb_update_stats - Update the board statistics counters.
1622  * @adapter: board private structure
1623  **/
1624 
1625 void
ixgb_update_stats(struct ixgb_adapter * adapter)1626 ixgb_update_stats(struct ixgb_adapter *adapter)
1627 {
1628 	struct net_device *netdev = adapter->netdev;
1629 	struct pci_dev *pdev = adapter->pdev;
1630 
1631 	/* Prevent stats update while adapter is being reset */
1632 	if (pci_channel_offline(pdev))
1633 		return;
1634 
1635 	if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1636 	   (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1637 		u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1638 		u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1639 		u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1640 		u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1641 
1642 		multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1643 		/* fix up multicast stats by removing broadcasts */
1644 		if (multi >= bcast)
1645 			multi -= bcast;
1646 
1647 		adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1648 		adapter->stats.mprch += (multi >> 32);
1649 		adapter->stats.bprcl += bcast_l;
1650 		adapter->stats.bprch += bcast_h;
1651 	} else {
1652 		adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1653 		adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1654 		adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1655 		adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1656 	}
1657 	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1658 	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1659 	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1660 	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1661 	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1662 	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1663 	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1664 	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1665 	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1666 	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1667 	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1668 	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1669 	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1670 	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1671 	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1672 	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1673 	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1674 	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1675 	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1676 	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1677 	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1678 	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1679 	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1680 	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1681 	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1682 	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1683 	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1684 	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1685 	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1686 	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1687 	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1688 	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1689 	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1690 	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1691 	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1692 	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1693 	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1694 	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1695 	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1696 	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1697 	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1698 	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1699 	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1700 	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1701 	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1702 	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1703 	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1704 	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1705 	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1706 	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1707 	adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1708 	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1709 	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1710 	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1711 	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1712 	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1713 
1714 	/* Fill out the OS statistics structure */
1715 
1716 	netdev->stats.rx_packets = adapter->stats.gprcl;
1717 	netdev->stats.tx_packets = adapter->stats.gptcl;
1718 	netdev->stats.rx_bytes = adapter->stats.gorcl;
1719 	netdev->stats.tx_bytes = adapter->stats.gotcl;
1720 	netdev->stats.multicast = adapter->stats.mprcl;
1721 	netdev->stats.collisions = 0;
1722 
1723 	/* ignore RLEC as it reports errors for padded (<64bytes) frames
1724 	 * with a length in the type/len field */
1725 	netdev->stats.rx_errors =
1726 	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1727 	    adapter->stats.ruc +
1728 	    adapter->stats.roc /*+ adapter->stats.rlec */  +
1729 	    adapter->stats.icbc +
1730 	    adapter->stats.ecbc + adapter->stats.mpc;
1731 
1732 	/* see above
1733 	 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1734 	 */
1735 
1736 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1737 	netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1738 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
1739 	netdev->stats.rx_over_errors = adapter->stats.mpc;
1740 
1741 	netdev->stats.tx_errors = 0;
1742 	netdev->stats.rx_frame_errors = 0;
1743 	netdev->stats.tx_aborted_errors = 0;
1744 	netdev->stats.tx_carrier_errors = 0;
1745 	netdev->stats.tx_fifo_errors = 0;
1746 	netdev->stats.tx_heartbeat_errors = 0;
1747 	netdev->stats.tx_window_errors = 0;
1748 }
1749 
1750 #define IXGB_MAX_INTR 10
1751 /**
1752  * ixgb_intr - Interrupt Handler
1753  * @irq: interrupt number
1754  * @data: pointer to a network interface device structure
1755  **/
1756 
1757 static irqreturn_t
ixgb_intr(int irq,void * data)1758 ixgb_intr(int irq, void *data)
1759 {
1760 	struct net_device *netdev = data;
1761 	struct ixgb_adapter *adapter = netdev_priv(netdev);
1762 	struct ixgb_hw *hw = &adapter->hw;
1763 	u32 icr = IXGB_READ_REG(hw, ICR);
1764 
1765 	if (unlikely(!icr))
1766 		return IRQ_NONE;  /* Not our interrupt */
1767 
1768 	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1769 		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1770 			mod_timer(&adapter->watchdog_timer, jiffies);
1771 
1772 	if (napi_schedule_prep(&adapter->napi)) {
1773 
1774 		/* Disable interrupts and register for poll. The flush
1775 		  of the posted write is intentionally left out.
1776 		*/
1777 
1778 		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1779 		__napi_schedule(&adapter->napi);
1780 	}
1781 	return IRQ_HANDLED;
1782 }
1783 
1784 /**
1785  * ixgb_clean - NAPI Rx polling callback
1786  * @adapter: board private structure
1787  **/
1788 
1789 static int
ixgb_clean(struct napi_struct * napi,int budget)1790 ixgb_clean(struct napi_struct *napi, int budget)
1791 {
1792 	struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1793 	int work_done = 0;
1794 
1795 	ixgb_clean_tx_irq(adapter);
1796 	ixgb_clean_rx_irq(adapter, &work_done, budget);
1797 
1798 	/* If budget not fully consumed, exit the polling mode */
1799 	if (work_done < budget) {
1800 		napi_complete(napi);
1801 		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1802 			ixgb_irq_enable(adapter);
1803 	}
1804 
1805 	return work_done;
1806 }
1807 
1808 /**
1809  * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1810  * @adapter: board private structure
1811  **/
1812 
1813 static bool
ixgb_clean_tx_irq(struct ixgb_adapter * adapter)1814 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1815 {
1816 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1817 	struct net_device *netdev = adapter->netdev;
1818 	struct ixgb_tx_desc *tx_desc, *eop_desc;
1819 	struct ixgb_buffer *buffer_info;
1820 	unsigned int i, eop;
1821 	bool cleaned = false;
1822 
1823 	i = tx_ring->next_to_clean;
1824 	eop = tx_ring->buffer_info[i].next_to_watch;
1825 	eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1826 
1827 	while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1828 
1829 		rmb(); /* read buffer_info after eop_desc */
1830 		for (cleaned = false; !cleaned; ) {
1831 			tx_desc = IXGB_TX_DESC(*tx_ring, i);
1832 			buffer_info = &tx_ring->buffer_info[i];
1833 
1834 			if (tx_desc->popts &
1835 			   (IXGB_TX_DESC_POPTS_TXSM |
1836 			    IXGB_TX_DESC_POPTS_IXSM))
1837 				adapter->hw_csum_tx_good++;
1838 
1839 			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1840 
1841 			*(u32 *)&(tx_desc->status) = 0;
1842 
1843 			cleaned = (i == eop);
1844 			if (++i == tx_ring->count) i = 0;
1845 		}
1846 
1847 		eop = tx_ring->buffer_info[i].next_to_watch;
1848 		eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1849 	}
1850 
1851 	tx_ring->next_to_clean = i;
1852 
1853 	if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1854 		     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1855 		/* Make sure that anybody stopping the queue after this
1856 		 * sees the new next_to_clean. */
1857 		smp_mb();
1858 
1859 		if (netif_queue_stopped(netdev) &&
1860 		    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1861 			netif_wake_queue(netdev);
1862 			++adapter->restart_queue;
1863 		}
1864 	}
1865 
1866 	if (adapter->detect_tx_hung) {
1867 		/* detect a transmit hang in hardware, this serializes the
1868 		 * check with the clearing of time_stamp and movement of i */
1869 		adapter->detect_tx_hung = false;
1870 		if (tx_ring->buffer_info[eop].time_stamp &&
1871 		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1872 		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1873 		        IXGB_STATUS_TXOFF)) {
1874 			/* detected Tx unit hang */
1875 			netif_err(adapter, drv, adapter->netdev,
1876 				  "Detected Tx Unit Hang\n"
1877 				  "  TDH                  <%x>\n"
1878 				  "  TDT                  <%x>\n"
1879 				  "  next_to_use          <%x>\n"
1880 				  "  next_to_clean        <%x>\n"
1881 				  "buffer_info[next_to_clean]\n"
1882 				  "  time_stamp           <%lx>\n"
1883 				  "  next_to_watch        <%x>\n"
1884 				  "  jiffies              <%lx>\n"
1885 				  "  next_to_watch.status <%x>\n",
1886 				  IXGB_READ_REG(&adapter->hw, TDH),
1887 				  IXGB_READ_REG(&adapter->hw, TDT),
1888 				  tx_ring->next_to_use,
1889 				  tx_ring->next_to_clean,
1890 				  tx_ring->buffer_info[eop].time_stamp,
1891 				  eop,
1892 				  jiffies,
1893 				  eop_desc->status);
1894 			netif_stop_queue(netdev);
1895 		}
1896 	}
1897 
1898 	return cleaned;
1899 }
1900 
1901 /**
1902  * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1903  * @adapter: board private structure
1904  * @rx_desc: receive descriptor
1905  * @sk_buff: socket buffer with received data
1906  **/
1907 
1908 static void
ixgb_rx_checksum(struct ixgb_adapter * adapter,struct ixgb_rx_desc * rx_desc,struct sk_buff * skb)1909 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1910                  struct ixgb_rx_desc *rx_desc,
1911                  struct sk_buff *skb)
1912 {
1913 	/* Ignore Checksum bit is set OR
1914 	 * TCP Checksum has not been calculated
1915 	 */
1916 	if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1917 	   (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1918 		skb_checksum_none_assert(skb);
1919 		return;
1920 	}
1921 
1922 	/* At this point we know the hardware did the TCP checksum */
1923 	/* now look at the TCP checksum error bit */
1924 	if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1925 		/* let the stack verify checksum errors */
1926 		skb_checksum_none_assert(skb);
1927 		adapter->hw_csum_rx_error++;
1928 	} else {
1929 		/* TCP checksum is good */
1930 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1931 		adapter->hw_csum_rx_good++;
1932 	}
1933 }
1934 
1935 /*
1936  * this should improve performance for small packets with large amounts
1937  * of reassembly being done in the stack
1938  */
ixgb_check_copybreak(struct net_device * netdev,struct ixgb_buffer * buffer_info,u32 length,struct sk_buff ** skb)1939 static void ixgb_check_copybreak(struct net_device *netdev,
1940 				 struct ixgb_buffer *buffer_info,
1941 				 u32 length, struct sk_buff **skb)
1942 {
1943 	struct sk_buff *new_skb;
1944 
1945 	if (length > copybreak)
1946 		return;
1947 
1948 	new_skb = netdev_alloc_skb_ip_align(netdev, length);
1949 	if (!new_skb)
1950 		return;
1951 
1952 	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1953 				       (*skb)->data - NET_IP_ALIGN,
1954 				       length + NET_IP_ALIGN);
1955 	/* save the skb in buffer_info as good */
1956 	buffer_info->skb = *skb;
1957 	*skb = new_skb;
1958 }
1959 
1960 /**
1961  * ixgb_clean_rx_irq - Send received data up the network stack,
1962  * @adapter: board private structure
1963  **/
1964 
1965 static bool
ixgb_clean_rx_irq(struct ixgb_adapter * adapter,int * work_done,int work_to_do)1966 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1967 {
1968 	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1969 	struct net_device *netdev = adapter->netdev;
1970 	struct pci_dev *pdev = adapter->pdev;
1971 	struct ixgb_rx_desc *rx_desc, *next_rxd;
1972 	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1973 	u32 length;
1974 	unsigned int i, j;
1975 	int cleaned_count = 0;
1976 	bool cleaned = false;
1977 
1978 	i = rx_ring->next_to_clean;
1979 	rx_desc = IXGB_RX_DESC(*rx_ring, i);
1980 	buffer_info = &rx_ring->buffer_info[i];
1981 
1982 	while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1983 		struct sk_buff *skb;
1984 		u8 status;
1985 
1986 		if (*work_done >= work_to_do)
1987 			break;
1988 
1989 		(*work_done)++;
1990 		rmb();	/* read descriptor and rx_buffer_info after status DD */
1991 		status = rx_desc->status;
1992 		skb = buffer_info->skb;
1993 		buffer_info->skb = NULL;
1994 
1995 		prefetch(skb->data - NET_IP_ALIGN);
1996 
1997 		if (++i == rx_ring->count)
1998 			i = 0;
1999 		next_rxd = IXGB_RX_DESC(*rx_ring, i);
2000 		prefetch(next_rxd);
2001 
2002 		j = i + 1;
2003 		if (j == rx_ring->count)
2004 			j = 0;
2005 		next2_buffer = &rx_ring->buffer_info[j];
2006 		prefetch(next2_buffer);
2007 
2008 		next_buffer = &rx_ring->buffer_info[i];
2009 
2010 		cleaned = true;
2011 		cleaned_count++;
2012 
2013 		dma_unmap_single(&pdev->dev,
2014 				 buffer_info->dma,
2015 				 buffer_info->length,
2016 				 DMA_FROM_DEVICE);
2017 		buffer_info->dma = 0;
2018 
2019 		length = le16_to_cpu(rx_desc->length);
2020 		rx_desc->length = 0;
2021 
2022 		if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2023 
2024 			/* All receives must fit into a single buffer */
2025 
2026 			IXGB_DBG("Receive packet consumed multiple buffers "
2027 					 "length<%x>\n", length);
2028 
2029 			dev_kfree_skb_irq(skb);
2030 			goto rxdesc_done;
2031 		}
2032 
2033 		if (unlikely(rx_desc->errors &
2034 		    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2035 		     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2036 			dev_kfree_skb_irq(skb);
2037 			goto rxdesc_done;
2038 		}
2039 
2040 		ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2041 
2042 		/* Good Receive */
2043 		skb_put(skb, length);
2044 
2045 		/* Receive Checksum Offload */
2046 		ixgb_rx_checksum(adapter, rx_desc, skb);
2047 
2048 		skb->protocol = eth_type_trans(skb, netdev);
2049 		if (status & IXGB_RX_DESC_STATUS_VP)
2050 			__vlan_hwaccel_put_tag(skb,
2051 					       le16_to_cpu(rx_desc->special));
2052 
2053 		netif_receive_skb(skb);
2054 
2055 rxdesc_done:
2056 		/* clean up descriptor, might be written over by hw */
2057 		rx_desc->status = 0;
2058 
2059 		/* return some buffers to hardware, one at a time is too slow */
2060 		if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2061 			ixgb_alloc_rx_buffers(adapter, cleaned_count);
2062 			cleaned_count = 0;
2063 		}
2064 
2065 		/* use prefetched values */
2066 		rx_desc = next_rxd;
2067 		buffer_info = next_buffer;
2068 	}
2069 
2070 	rx_ring->next_to_clean = i;
2071 
2072 	cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2073 	if (cleaned_count)
2074 		ixgb_alloc_rx_buffers(adapter, cleaned_count);
2075 
2076 	return cleaned;
2077 }
2078 
2079 /**
2080  * ixgb_alloc_rx_buffers - Replace used receive buffers
2081  * @adapter: address of board private structure
2082  **/
2083 
2084 static void
ixgb_alloc_rx_buffers(struct ixgb_adapter * adapter,int cleaned_count)2085 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2086 {
2087 	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2088 	struct net_device *netdev = adapter->netdev;
2089 	struct pci_dev *pdev = adapter->pdev;
2090 	struct ixgb_rx_desc *rx_desc;
2091 	struct ixgb_buffer *buffer_info;
2092 	struct sk_buff *skb;
2093 	unsigned int i;
2094 	long cleancount;
2095 
2096 	i = rx_ring->next_to_use;
2097 	buffer_info = &rx_ring->buffer_info[i];
2098 	cleancount = IXGB_DESC_UNUSED(rx_ring);
2099 
2100 
2101 	/* leave three descriptors unused */
2102 	while (--cleancount > 2 && cleaned_count--) {
2103 		/* recycle! its good for you */
2104 		skb = buffer_info->skb;
2105 		if (skb) {
2106 			skb_trim(skb, 0);
2107 			goto map_skb;
2108 		}
2109 
2110 		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2111 		if (unlikely(!skb)) {
2112 			/* Better luck next round */
2113 			adapter->alloc_rx_buff_failed++;
2114 			break;
2115 		}
2116 
2117 		buffer_info->skb = skb;
2118 		buffer_info->length = adapter->rx_buffer_len;
2119 map_skb:
2120 		buffer_info->dma = dma_map_single(&pdev->dev,
2121 		                                  skb->data,
2122 		                                  adapter->rx_buffer_len,
2123 						  DMA_FROM_DEVICE);
2124 
2125 		rx_desc = IXGB_RX_DESC(*rx_ring, i);
2126 		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2127 		/* guarantee DD bit not set now before h/w gets descriptor
2128 		 * this is the rest of the workaround for h/w double
2129 		 * writeback. */
2130 		rx_desc->status = 0;
2131 
2132 
2133 		if (++i == rx_ring->count) i = 0;
2134 		buffer_info = &rx_ring->buffer_info[i];
2135 	}
2136 
2137 	if (likely(rx_ring->next_to_use != i)) {
2138 		rx_ring->next_to_use = i;
2139 		if (unlikely(i-- == 0))
2140 			i = (rx_ring->count - 1);
2141 
2142 		/* Force memory writes to complete before letting h/w
2143 		 * know there are new descriptors to fetch.  (Only
2144 		 * applicable for weak-ordered memory model archs, such
2145 		 * as IA-64). */
2146 		wmb();
2147 		IXGB_WRITE_REG(&adapter->hw, RDT, i);
2148 	}
2149 }
2150 
2151 static void
ixgb_vlan_strip_enable(struct ixgb_adapter * adapter)2152 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2153 {
2154 	u32 ctrl;
2155 
2156 	/* enable VLAN tag insert/strip */
2157 	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2158 	ctrl |= IXGB_CTRL0_VME;
2159 	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2160 }
2161 
2162 static void
ixgb_vlan_strip_disable(struct ixgb_adapter * adapter)2163 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2164 {
2165 	u32 ctrl;
2166 
2167 	/* disable VLAN tag insert/strip */
2168 	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2169 	ctrl &= ~IXGB_CTRL0_VME;
2170 	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2171 }
2172 
2173 static void
ixgb_vlan_rx_add_vid(struct net_device * netdev,u16 vid)2174 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2175 {
2176 	struct ixgb_adapter *adapter = netdev_priv(netdev);
2177 	u32 vfta, index;
2178 
2179 	/* add VID to filter table */
2180 
2181 	index = (vid >> 5) & 0x7F;
2182 	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2183 	vfta |= (1 << (vid & 0x1F));
2184 	ixgb_write_vfta(&adapter->hw, index, vfta);
2185 	set_bit(vid, adapter->active_vlans);
2186 }
2187 
2188 static void
ixgb_vlan_rx_kill_vid(struct net_device * netdev,u16 vid)2189 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2190 {
2191 	struct ixgb_adapter *adapter = netdev_priv(netdev);
2192 	u32 vfta, index;
2193 
2194 	/* remove VID from filter table */
2195 
2196 	index = (vid >> 5) & 0x7F;
2197 	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2198 	vfta &= ~(1 << (vid & 0x1F));
2199 	ixgb_write_vfta(&adapter->hw, index, vfta);
2200 	clear_bit(vid, adapter->active_vlans);
2201 }
2202 
2203 static void
ixgb_restore_vlan(struct ixgb_adapter * adapter)2204 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2205 {
2206 	u16 vid;
2207 
2208 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2209 		ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2210 }
2211 
2212 #ifdef CONFIG_NET_POLL_CONTROLLER
2213 /*
2214  * Polling 'interrupt' - used by things like netconsole to send skbs
2215  * without having to re-enable interrupts. It's not called while
2216  * the interrupt routine is executing.
2217  */
2218 
ixgb_netpoll(struct net_device * dev)2219 static void ixgb_netpoll(struct net_device *dev)
2220 {
2221 	struct ixgb_adapter *adapter = netdev_priv(dev);
2222 
2223 	disable_irq(adapter->pdev->irq);
2224 	ixgb_intr(adapter->pdev->irq, dev);
2225 	enable_irq(adapter->pdev->irq);
2226 }
2227 #endif
2228 
2229 /**
2230  * ixgb_io_error_detected() - called when PCI error is detected
2231  * @pdev    pointer to pci device with error
2232  * @state   pci channel state after error
2233  *
2234  * This callback is called by the PCI subsystem whenever
2235  * a PCI bus error is detected.
2236  */
ixgb_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)2237 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2238                                                enum pci_channel_state state)
2239 {
2240 	struct net_device *netdev = pci_get_drvdata(pdev);
2241 	struct ixgb_adapter *adapter = netdev_priv(netdev);
2242 
2243 	netif_device_detach(netdev);
2244 
2245 	if (state == pci_channel_io_perm_failure)
2246 		return PCI_ERS_RESULT_DISCONNECT;
2247 
2248 	if (netif_running(netdev))
2249 		ixgb_down(adapter, true);
2250 
2251 	pci_disable_device(pdev);
2252 
2253 	/* Request a slot reset. */
2254 	return PCI_ERS_RESULT_NEED_RESET;
2255 }
2256 
2257 /**
2258  * ixgb_io_slot_reset - called after the pci bus has been reset.
2259  * @pdev    pointer to pci device with error
2260  *
2261  * This callback is called after the PCI bus has been reset.
2262  * Basically, this tries to restart the card from scratch.
2263  * This is a shortened version of the device probe/discovery code,
2264  * it resembles the first-half of the ixgb_probe() routine.
2265  */
ixgb_io_slot_reset(struct pci_dev * pdev)2266 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2267 {
2268 	struct net_device *netdev = pci_get_drvdata(pdev);
2269 	struct ixgb_adapter *adapter = netdev_priv(netdev);
2270 
2271 	if (pci_enable_device(pdev)) {
2272 		netif_err(adapter, probe, adapter->netdev,
2273 			  "Cannot re-enable PCI device after reset\n");
2274 		return PCI_ERS_RESULT_DISCONNECT;
2275 	}
2276 
2277 	/* Perform card reset only on one instance of the card */
2278 	if (0 != PCI_FUNC (pdev->devfn))
2279 		return PCI_ERS_RESULT_RECOVERED;
2280 
2281 	pci_set_master(pdev);
2282 
2283 	netif_carrier_off(netdev);
2284 	netif_stop_queue(netdev);
2285 	ixgb_reset(adapter);
2286 
2287 	/* Make sure the EEPROM is good */
2288 	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2289 		netif_err(adapter, probe, adapter->netdev,
2290 			  "After reset, the EEPROM checksum is not valid\n");
2291 		return PCI_ERS_RESULT_DISCONNECT;
2292 	}
2293 	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2294 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2295 
2296 	if (!is_valid_ether_addr(netdev->perm_addr)) {
2297 		netif_err(adapter, probe, adapter->netdev,
2298 			  "After reset, invalid MAC address\n");
2299 		return PCI_ERS_RESULT_DISCONNECT;
2300 	}
2301 
2302 	return PCI_ERS_RESULT_RECOVERED;
2303 }
2304 
2305 /**
2306  * ixgb_io_resume - called when its OK to resume normal operations
2307  * @pdev    pointer to pci device with error
2308  *
2309  * The error recovery driver tells us that its OK to resume
2310  * normal operation. Implementation resembles the second-half
2311  * of the ixgb_probe() routine.
2312  */
ixgb_io_resume(struct pci_dev * pdev)2313 static void ixgb_io_resume(struct pci_dev *pdev)
2314 {
2315 	struct net_device *netdev = pci_get_drvdata(pdev);
2316 	struct ixgb_adapter *adapter = netdev_priv(netdev);
2317 
2318 	pci_set_master(pdev);
2319 
2320 	if (netif_running(netdev)) {
2321 		if (ixgb_up(adapter)) {
2322 			pr_err("can't bring device back up after reset\n");
2323 			return;
2324 		}
2325 	}
2326 
2327 	netif_device_attach(netdev);
2328 	mod_timer(&adapter->watchdog_timer, jiffies);
2329 }
2330 
2331 /* ixgb_main.c */
2332