1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/mii.h>
46 #include <linux/ethtool.h>
47 #include <linux/if_vlan.h>
48 #include <linux/cpu.h>
49 #include <linux/smp.h>
50 #include <linux/pm_qos.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/aer.h>
53 #include <linux/prefetch.h>
54
55 #include "e1000.h"
56
57 #define DRV_EXTRAVERSION "-k"
58
59 #define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
60 char e1000e_driver_name[] = "e1000e";
61 const char e1000e_driver_version[] = DRV_VERSION;
62
63 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
64 static int debug = -1;
65 module_param(debug, int, 0);
66 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
67
68 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
69
70 static const struct e1000_info *e1000_info_tbl[] = {
71 [board_82571] = &e1000_82571_info,
72 [board_82572] = &e1000_82572_info,
73 [board_82573] = &e1000_82573_info,
74 [board_82574] = &e1000_82574_info,
75 [board_82583] = &e1000_82583_info,
76 [board_80003es2lan] = &e1000_es2_info,
77 [board_ich8lan] = &e1000_ich8_info,
78 [board_ich9lan] = &e1000_ich9_info,
79 [board_ich10lan] = &e1000_ich10_info,
80 [board_pchlan] = &e1000_pch_info,
81 [board_pch2lan] = &e1000_pch2_info,
82 };
83
84 struct e1000_reg_info {
85 u32 ofs;
86 char *name;
87 };
88
89 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
90 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
91 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
92 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
93 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
94
95 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
96 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
97 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
98 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
99 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
100
101 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
102
103 /* General Registers */
104 {E1000_CTRL, "CTRL"},
105 {E1000_STATUS, "STATUS"},
106 {E1000_CTRL_EXT, "CTRL_EXT"},
107
108 /* Interrupt Registers */
109 {E1000_ICR, "ICR"},
110
111 /* Rx Registers */
112 {E1000_RCTL, "RCTL"},
113 {E1000_RDLEN, "RDLEN"},
114 {E1000_RDH, "RDH"},
115 {E1000_RDT, "RDT"},
116 {E1000_RDTR, "RDTR"},
117 {E1000_RXDCTL(0), "RXDCTL"},
118 {E1000_ERT, "ERT"},
119 {E1000_RDBAL, "RDBAL"},
120 {E1000_RDBAH, "RDBAH"},
121 {E1000_RDFH, "RDFH"},
122 {E1000_RDFT, "RDFT"},
123 {E1000_RDFHS, "RDFHS"},
124 {E1000_RDFTS, "RDFTS"},
125 {E1000_RDFPC, "RDFPC"},
126
127 /* Tx Registers */
128 {E1000_TCTL, "TCTL"},
129 {E1000_TDBAL, "TDBAL"},
130 {E1000_TDBAH, "TDBAH"},
131 {E1000_TDLEN, "TDLEN"},
132 {E1000_TDH, "TDH"},
133 {E1000_TDT, "TDT"},
134 {E1000_TIDV, "TIDV"},
135 {E1000_TXDCTL(0), "TXDCTL"},
136 {E1000_TADV, "TADV"},
137 {E1000_TARC(0), "TARC"},
138 {E1000_TDFH, "TDFH"},
139 {E1000_TDFT, "TDFT"},
140 {E1000_TDFHS, "TDFHS"},
141 {E1000_TDFTS, "TDFTS"},
142 {E1000_TDFPC, "TDFPC"},
143
144 /* List Terminator */
145 {0, NULL}
146 };
147
148 /*
149 * e1000_regdump - register printout routine
150 */
e1000_regdump(struct e1000_hw * hw,struct e1000_reg_info * reginfo)151 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
152 {
153 int n = 0;
154 char rname[16];
155 u32 regs[8];
156
157 switch (reginfo->ofs) {
158 case E1000_RXDCTL(0):
159 for (n = 0; n < 2; n++)
160 regs[n] = __er32(hw, E1000_RXDCTL(n));
161 break;
162 case E1000_TXDCTL(0):
163 for (n = 0; n < 2; n++)
164 regs[n] = __er32(hw, E1000_TXDCTL(n));
165 break;
166 case E1000_TARC(0):
167 for (n = 0; n < 2; n++)
168 regs[n] = __er32(hw, E1000_TARC(n));
169 break;
170 default:
171 pr_info("%-15s %08x\n",
172 reginfo->name, __er32(hw, reginfo->ofs));
173 return;
174 }
175
176 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
177 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
178 }
179
180 /*
181 * e1000e_dump - Print registers, Tx-ring and Rx-ring
182 */
e1000e_dump(struct e1000_adapter * adapter)183 static void e1000e_dump(struct e1000_adapter *adapter)
184 {
185 struct net_device *netdev = adapter->netdev;
186 struct e1000_hw *hw = &adapter->hw;
187 struct e1000_reg_info *reginfo;
188 struct e1000_ring *tx_ring = adapter->tx_ring;
189 struct e1000_tx_desc *tx_desc;
190 struct my_u0 {
191 __le64 a;
192 __le64 b;
193 } *u0;
194 struct e1000_buffer *buffer_info;
195 struct e1000_ring *rx_ring = adapter->rx_ring;
196 union e1000_rx_desc_packet_split *rx_desc_ps;
197 union e1000_rx_desc_extended *rx_desc;
198 struct my_u1 {
199 __le64 a;
200 __le64 b;
201 __le64 c;
202 __le64 d;
203 } *u1;
204 u32 staterr;
205 int i = 0;
206
207 if (!netif_msg_hw(adapter))
208 return;
209
210 /* Print netdevice Info */
211 if (netdev) {
212 dev_info(&adapter->pdev->dev, "Net device Info\n");
213 pr_info("Device Name state trans_start last_rx\n");
214 pr_info("%-15s %016lX %016lX %016lX\n",
215 netdev->name, netdev->state, netdev->trans_start,
216 netdev->last_rx);
217 }
218
219 /* Print Registers */
220 dev_info(&adapter->pdev->dev, "Register Dump\n");
221 pr_info(" Register Name Value\n");
222 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
223 reginfo->name; reginfo++) {
224 e1000_regdump(hw, reginfo);
225 }
226
227 /* Print Tx Ring Summary */
228 if (!netdev || !netif_running(netdev))
229 return;
230
231 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
232 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
233 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
234 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
235 0, tx_ring->next_to_use, tx_ring->next_to_clean,
236 (unsigned long long)buffer_info->dma,
237 buffer_info->length,
238 buffer_info->next_to_watch,
239 (unsigned long long)buffer_info->time_stamp);
240
241 /* Print Tx Ring */
242 if (!netif_msg_tx_done(adapter))
243 goto rx_ring_summary;
244
245 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
246
247 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
248 *
249 * Legacy Transmit Descriptor
250 * +--------------------------------------------------------------+
251 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
252 * +--------------------------------------------------------------+
253 * 8 | Special | CSS | Status | CMD | CSO | Length |
254 * +--------------------------------------------------------------+
255 * 63 48 47 36 35 32 31 24 23 16 15 0
256 *
257 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
258 * 63 48 47 40 39 32 31 16 15 8 7 0
259 * +----------------------------------------------------------------+
260 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
261 * +----------------------------------------------------------------+
262 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
263 * +----------------------------------------------------------------+
264 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
265 *
266 * Extended Data Descriptor (DTYP=0x1)
267 * +----------------------------------------------------------------+
268 * 0 | Buffer Address [63:0] |
269 * +----------------------------------------------------------------+
270 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
271 * +----------------------------------------------------------------+
272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
273 */
274 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
275 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
276 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
277 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
278 const char *next_desc;
279 tx_desc = E1000_TX_DESC(*tx_ring, i);
280 buffer_info = &tx_ring->buffer_info[i];
281 u0 = (struct my_u0 *)tx_desc;
282 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
283 next_desc = " NTC/U";
284 else if (i == tx_ring->next_to_use)
285 next_desc = " NTU";
286 else if (i == tx_ring->next_to_clean)
287 next_desc = " NTC";
288 else
289 next_desc = "";
290 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
291 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
292 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
293 i,
294 (unsigned long long)le64_to_cpu(u0->a),
295 (unsigned long long)le64_to_cpu(u0->b),
296 (unsigned long long)buffer_info->dma,
297 buffer_info->length, buffer_info->next_to_watch,
298 (unsigned long long)buffer_info->time_stamp,
299 buffer_info->skb, next_desc);
300
301 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
302 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
303 16, 1, phys_to_virt(buffer_info->dma),
304 buffer_info->length, true);
305 }
306
307 /* Print Rx Ring Summary */
308 rx_ring_summary:
309 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
310 pr_info("Queue [NTU] [NTC]\n");
311 pr_info(" %5d %5X %5X\n",
312 0, rx_ring->next_to_use, rx_ring->next_to_clean);
313
314 /* Print Rx Ring */
315 if (!netif_msg_rx_status(adapter))
316 return;
317
318 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
319 switch (adapter->rx_ps_pages) {
320 case 1:
321 case 2:
322 case 3:
323 /* [Extended] Packet Split Receive Descriptor Format
324 *
325 * +-----------------------------------------------------+
326 * 0 | Buffer Address 0 [63:0] |
327 * +-----------------------------------------------------+
328 * 8 | Buffer Address 1 [63:0] |
329 * +-----------------------------------------------------+
330 * 16 | Buffer Address 2 [63:0] |
331 * +-----------------------------------------------------+
332 * 24 | Buffer Address 3 [63:0] |
333 * +-----------------------------------------------------+
334 */
335 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
336 /* [Extended] Receive Descriptor (Write-Back) Format
337 *
338 * 63 48 47 32 31 13 12 8 7 4 3 0
339 * +------------------------------------------------------+
340 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
341 * | Checksum | Ident | | Queue | | Type |
342 * +------------------------------------------------------+
343 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
344 * +------------------------------------------------------+
345 * 63 48 47 32 31 20 19 0
346 */
347 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
348 for (i = 0; i < rx_ring->count; i++) {
349 const char *next_desc;
350 buffer_info = &rx_ring->buffer_info[i];
351 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
352 u1 = (struct my_u1 *)rx_desc_ps;
353 staterr =
354 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
355
356 if (i == rx_ring->next_to_use)
357 next_desc = " NTU";
358 else if (i == rx_ring->next_to_clean)
359 next_desc = " NTC";
360 else
361 next_desc = "";
362
363 if (staterr & E1000_RXD_STAT_DD) {
364 /* Descriptor Done */
365 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
366 "RWB", i,
367 (unsigned long long)le64_to_cpu(u1->a),
368 (unsigned long long)le64_to_cpu(u1->b),
369 (unsigned long long)le64_to_cpu(u1->c),
370 (unsigned long long)le64_to_cpu(u1->d),
371 buffer_info->skb, next_desc);
372 } else {
373 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
374 "R ", i,
375 (unsigned long long)le64_to_cpu(u1->a),
376 (unsigned long long)le64_to_cpu(u1->b),
377 (unsigned long long)le64_to_cpu(u1->c),
378 (unsigned long long)le64_to_cpu(u1->d),
379 (unsigned long long)buffer_info->dma,
380 buffer_info->skb, next_desc);
381
382 if (netif_msg_pktdata(adapter))
383 print_hex_dump(KERN_INFO, "",
384 DUMP_PREFIX_ADDRESS, 16, 1,
385 phys_to_virt(buffer_info->dma),
386 adapter->rx_ps_bsize0, true);
387 }
388 }
389 break;
390 default:
391 case 0:
392 /* Extended Receive Descriptor (Read) Format
393 *
394 * +-----------------------------------------------------+
395 * 0 | Buffer Address [63:0] |
396 * +-----------------------------------------------------+
397 * 8 | Reserved |
398 * +-----------------------------------------------------+
399 */
400 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
401 /* Extended Receive Descriptor (Write-Back) Format
402 *
403 * 63 48 47 32 31 24 23 4 3 0
404 * +------------------------------------------------------+
405 * | RSS Hash | | | |
406 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
407 * | Packet | IP | | | Type |
408 * | Checksum | Ident | | | |
409 * +------------------------------------------------------+
410 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
411 * +------------------------------------------------------+
412 * 63 48 47 32 31 20 19 0
413 */
414 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
415
416 for (i = 0; i < rx_ring->count; i++) {
417 const char *next_desc;
418
419 buffer_info = &rx_ring->buffer_info[i];
420 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
421 u1 = (struct my_u1 *)rx_desc;
422 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
423
424 if (i == rx_ring->next_to_use)
425 next_desc = " NTU";
426 else if (i == rx_ring->next_to_clean)
427 next_desc = " NTC";
428 else
429 next_desc = "";
430
431 if (staterr & E1000_RXD_STAT_DD) {
432 /* Descriptor Done */
433 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
434 "RWB", i,
435 (unsigned long long)le64_to_cpu(u1->a),
436 (unsigned long long)le64_to_cpu(u1->b),
437 buffer_info->skb, next_desc);
438 } else {
439 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
440 "R ", i,
441 (unsigned long long)le64_to_cpu(u1->a),
442 (unsigned long long)le64_to_cpu(u1->b),
443 (unsigned long long)buffer_info->dma,
444 buffer_info->skb, next_desc);
445
446 if (netif_msg_pktdata(adapter))
447 print_hex_dump(KERN_INFO, "",
448 DUMP_PREFIX_ADDRESS, 16,
449 1,
450 phys_to_virt
451 (buffer_info->dma),
452 adapter->rx_buffer_len,
453 true);
454 }
455 }
456 }
457 }
458
459 /**
460 * e1000_desc_unused - calculate if we have unused descriptors
461 **/
e1000_desc_unused(struct e1000_ring * ring)462 static int e1000_desc_unused(struct e1000_ring *ring)
463 {
464 if (ring->next_to_clean > ring->next_to_use)
465 return ring->next_to_clean - ring->next_to_use - 1;
466
467 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
468 }
469
470 /**
471 * e1000_receive_skb - helper function to handle Rx indications
472 * @adapter: board private structure
473 * @status: descriptor status field as written by hardware
474 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
475 * @skb: pointer to sk_buff to be indicated to stack
476 **/
e1000_receive_skb(struct e1000_adapter * adapter,struct net_device * netdev,struct sk_buff * skb,u8 status,__le16 vlan)477 static void e1000_receive_skb(struct e1000_adapter *adapter,
478 struct net_device *netdev, struct sk_buff *skb,
479 u8 status, __le16 vlan)
480 {
481 u16 tag = le16_to_cpu(vlan);
482 skb->protocol = eth_type_trans(skb, netdev);
483
484 if (status & E1000_RXD_STAT_VP)
485 __vlan_hwaccel_put_tag(skb, tag);
486
487 napi_gro_receive(&adapter->napi, skb);
488 }
489
490 /**
491 * e1000_rx_checksum - Receive Checksum Offload
492 * @adapter: board private structure
493 * @status_err: receive descriptor status and error fields
494 * @csum: receive descriptor csum field
495 * @sk_buff: socket buffer with received data
496 **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,struct sk_buff * skb)497 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
498 struct sk_buff *skb)
499 {
500 u16 status = (u16)status_err;
501 u8 errors = (u8)(status_err >> 24);
502
503 skb_checksum_none_assert(skb);
504
505 /* Rx checksum disabled */
506 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
507 return;
508
509 /* Ignore Checksum bit is set */
510 if (status & E1000_RXD_STAT_IXSM)
511 return;
512
513 /* TCP/UDP checksum error bit or IP checksum error bit is set */
514 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
515 /* let the stack verify checksum errors */
516 adapter->hw_csum_err++;
517 return;
518 }
519
520 /* TCP/UDP Checksum has not been calculated */
521 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
522 return;
523
524 /* It must be a TCP or UDP packet with a valid checksum */
525 skb->ip_summed = CHECKSUM_UNNECESSARY;
526 adapter->hw_csum_good++;
527 }
528
529 /**
530 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
531 * @hw: pointer to the HW structure
532 * @tail: address of tail descriptor register
533 * @i: value to write to tail descriptor register
534 *
535 * When updating the tail register, the ME could be accessing Host CSR
536 * registers at the same time. Normally, this is handled in h/w by an
537 * arbiter but on some parts there is a bug that acknowledges Host accesses
538 * later than it should which could result in the descriptor register to
539 * have an incorrect value. Workaround this by checking the FWSM register
540 * which has bit 24 set while ME is accessing Host CSR registers, wait
541 * if it is set and try again a number of times.
542 **/
e1000e_update_tail_wa(struct e1000_hw * hw,void __iomem * tail,unsigned int i)543 static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
544 unsigned int i)
545 {
546 unsigned int j = 0;
547
548 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
549 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
550 udelay(50);
551
552 writel(i, tail);
553
554 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
555 return E1000_ERR_SWFW_SYNC;
556
557 return 0;
558 }
559
e1000e_update_rdt_wa(struct e1000_ring * rx_ring,unsigned int i)560 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
561 {
562 struct e1000_adapter *adapter = rx_ring->adapter;
563 struct e1000_hw *hw = &adapter->hw;
564
565 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
566 u32 rctl = er32(RCTL);
567 ew32(RCTL, rctl & ~E1000_RCTL_EN);
568 e_err("ME firmware caused invalid RDT - resetting\n");
569 schedule_work(&adapter->reset_task);
570 }
571 }
572
e1000e_update_tdt_wa(struct e1000_ring * tx_ring,unsigned int i)573 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
574 {
575 struct e1000_adapter *adapter = tx_ring->adapter;
576 struct e1000_hw *hw = &adapter->hw;
577
578 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
579 u32 tctl = er32(TCTL);
580 ew32(TCTL, tctl & ~E1000_TCTL_EN);
581 e_err("ME firmware caused invalid TDT - resetting\n");
582 schedule_work(&adapter->reset_task);
583 }
584 }
585
586 /**
587 * e1000_alloc_rx_buffers - Replace used receive buffers
588 * @rx_ring: Rx descriptor ring
589 **/
e1000_alloc_rx_buffers(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)590 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
591 int cleaned_count, gfp_t gfp)
592 {
593 struct e1000_adapter *adapter = rx_ring->adapter;
594 struct net_device *netdev = adapter->netdev;
595 struct pci_dev *pdev = adapter->pdev;
596 union e1000_rx_desc_extended *rx_desc;
597 struct e1000_buffer *buffer_info;
598 struct sk_buff *skb;
599 unsigned int i;
600 unsigned int bufsz = adapter->rx_buffer_len;
601
602 i = rx_ring->next_to_use;
603 buffer_info = &rx_ring->buffer_info[i];
604
605 while (cleaned_count--) {
606 skb = buffer_info->skb;
607 if (skb) {
608 skb_trim(skb, 0);
609 goto map_skb;
610 }
611
612 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
613 if (!skb) {
614 /* Better luck next round */
615 adapter->alloc_rx_buff_failed++;
616 break;
617 }
618
619 buffer_info->skb = skb;
620 map_skb:
621 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
622 adapter->rx_buffer_len,
623 DMA_FROM_DEVICE);
624 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
625 dev_err(&pdev->dev, "Rx DMA map failed\n");
626 adapter->rx_dma_failed++;
627 break;
628 }
629
630 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
631 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
632
633 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
634 /*
635 * Force memory writes to complete before letting h/w
636 * know there are new descriptors to fetch. (Only
637 * applicable for weak-ordered memory model archs,
638 * such as IA-64).
639 */
640 wmb();
641 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
642 e1000e_update_rdt_wa(rx_ring, i);
643 else
644 writel(i, rx_ring->tail);
645 }
646 i++;
647 if (i == rx_ring->count)
648 i = 0;
649 buffer_info = &rx_ring->buffer_info[i];
650 }
651
652 rx_ring->next_to_use = i;
653 }
654
655 /**
656 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
657 * @rx_ring: Rx descriptor ring
658 **/
e1000_alloc_rx_buffers_ps(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)659 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
660 int cleaned_count, gfp_t gfp)
661 {
662 struct e1000_adapter *adapter = rx_ring->adapter;
663 struct net_device *netdev = adapter->netdev;
664 struct pci_dev *pdev = adapter->pdev;
665 union e1000_rx_desc_packet_split *rx_desc;
666 struct e1000_buffer *buffer_info;
667 struct e1000_ps_page *ps_page;
668 struct sk_buff *skb;
669 unsigned int i, j;
670
671 i = rx_ring->next_to_use;
672 buffer_info = &rx_ring->buffer_info[i];
673
674 while (cleaned_count--) {
675 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
676
677 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
678 ps_page = &buffer_info->ps_pages[j];
679 if (j >= adapter->rx_ps_pages) {
680 /* all unused desc entries get hw null ptr */
681 rx_desc->read.buffer_addr[j + 1] =
682 ~cpu_to_le64(0);
683 continue;
684 }
685 if (!ps_page->page) {
686 ps_page->page = alloc_page(gfp);
687 if (!ps_page->page) {
688 adapter->alloc_rx_buff_failed++;
689 goto no_buffers;
690 }
691 ps_page->dma = dma_map_page(&pdev->dev,
692 ps_page->page,
693 0, PAGE_SIZE,
694 DMA_FROM_DEVICE);
695 if (dma_mapping_error(&pdev->dev,
696 ps_page->dma)) {
697 dev_err(&adapter->pdev->dev,
698 "Rx DMA page map failed\n");
699 adapter->rx_dma_failed++;
700 goto no_buffers;
701 }
702 }
703 /*
704 * Refresh the desc even if buffer_addrs
705 * didn't change because each write-back
706 * erases this info.
707 */
708 rx_desc->read.buffer_addr[j + 1] =
709 cpu_to_le64(ps_page->dma);
710 }
711
712 skb = __netdev_alloc_skb_ip_align(netdev,
713 adapter->rx_ps_bsize0,
714 gfp);
715
716 if (!skb) {
717 adapter->alloc_rx_buff_failed++;
718 break;
719 }
720
721 buffer_info->skb = skb;
722 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
723 adapter->rx_ps_bsize0,
724 DMA_FROM_DEVICE);
725 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
726 dev_err(&pdev->dev, "Rx DMA map failed\n");
727 adapter->rx_dma_failed++;
728 /* cleanup skb */
729 dev_kfree_skb_any(skb);
730 buffer_info->skb = NULL;
731 break;
732 }
733
734 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
735
736 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
737 /*
738 * Force memory writes to complete before letting h/w
739 * know there are new descriptors to fetch. (Only
740 * applicable for weak-ordered memory model archs,
741 * such as IA-64).
742 */
743 wmb();
744 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
745 e1000e_update_rdt_wa(rx_ring, i << 1);
746 else
747 writel(i << 1, rx_ring->tail);
748 }
749
750 i++;
751 if (i == rx_ring->count)
752 i = 0;
753 buffer_info = &rx_ring->buffer_info[i];
754 }
755
756 no_buffers:
757 rx_ring->next_to_use = i;
758 }
759
760 /**
761 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
762 * @rx_ring: Rx descriptor ring
763 * @cleaned_count: number of buffers to allocate this pass
764 **/
765
e1000_alloc_jumbo_rx_buffers(struct e1000_ring * rx_ring,int cleaned_count,gfp_t gfp)766 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
767 int cleaned_count, gfp_t gfp)
768 {
769 struct e1000_adapter *adapter = rx_ring->adapter;
770 struct net_device *netdev = adapter->netdev;
771 struct pci_dev *pdev = adapter->pdev;
772 union e1000_rx_desc_extended *rx_desc;
773 struct e1000_buffer *buffer_info;
774 struct sk_buff *skb;
775 unsigned int i;
776 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
777
778 i = rx_ring->next_to_use;
779 buffer_info = &rx_ring->buffer_info[i];
780
781 while (cleaned_count--) {
782 skb = buffer_info->skb;
783 if (skb) {
784 skb_trim(skb, 0);
785 goto check_page;
786 }
787
788 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
789 if (unlikely(!skb)) {
790 /* Better luck next round */
791 adapter->alloc_rx_buff_failed++;
792 break;
793 }
794
795 buffer_info->skb = skb;
796 check_page:
797 /* allocate a new page if necessary */
798 if (!buffer_info->page) {
799 buffer_info->page = alloc_page(gfp);
800 if (unlikely(!buffer_info->page)) {
801 adapter->alloc_rx_buff_failed++;
802 break;
803 }
804 }
805
806 if (!buffer_info->dma)
807 buffer_info->dma = dma_map_page(&pdev->dev,
808 buffer_info->page, 0,
809 PAGE_SIZE,
810 DMA_FROM_DEVICE);
811
812 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
813 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
814
815 if (unlikely(++i == rx_ring->count))
816 i = 0;
817 buffer_info = &rx_ring->buffer_info[i];
818 }
819
820 if (likely(rx_ring->next_to_use != i)) {
821 rx_ring->next_to_use = i;
822 if (unlikely(i-- == 0))
823 i = (rx_ring->count - 1);
824
825 /* Force memory writes to complete before letting h/w
826 * know there are new descriptors to fetch. (Only
827 * applicable for weak-ordered memory model archs,
828 * such as IA-64). */
829 wmb();
830 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
831 e1000e_update_rdt_wa(rx_ring, i);
832 else
833 writel(i, rx_ring->tail);
834 }
835 }
836
e1000_rx_hash(struct net_device * netdev,__le32 rss,struct sk_buff * skb)837 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
838 struct sk_buff *skb)
839 {
840 if (netdev->features & NETIF_F_RXHASH)
841 skb->rxhash = le32_to_cpu(rss);
842 }
843
844 /**
845 * e1000_clean_rx_irq - Send received data up the network stack
846 * @rx_ring: Rx descriptor ring
847 *
848 * the return value indicates whether actual cleaning was done, there
849 * is no guarantee that everything was cleaned
850 **/
e1000_clean_rx_irq(struct e1000_ring * rx_ring,int * work_done,int work_to_do)851 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
852 int work_to_do)
853 {
854 struct e1000_adapter *adapter = rx_ring->adapter;
855 struct net_device *netdev = adapter->netdev;
856 struct pci_dev *pdev = adapter->pdev;
857 struct e1000_hw *hw = &adapter->hw;
858 union e1000_rx_desc_extended *rx_desc, *next_rxd;
859 struct e1000_buffer *buffer_info, *next_buffer;
860 u32 length, staterr;
861 unsigned int i;
862 int cleaned_count = 0;
863 bool cleaned = false;
864 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
865
866 i = rx_ring->next_to_clean;
867 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
868 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
869 buffer_info = &rx_ring->buffer_info[i];
870
871 while (staterr & E1000_RXD_STAT_DD) {
872 struct sk_buff *skb;
873
874 if (*work_done >= work_to_do)
875 break;
876 (*work_done)++;
877 rmb(); /* read descriptor and rx_buffer_info after status DD */
878
879 skb = buffer_info->skb;
880 buffer_info->skb = NULL;
881
882 prefetch(skb->data - NET_IP_ALIGN);
883
884 i++;
885 if (i == rx_ring->count)
886 i = 0;
887 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
888 prefetch(next_rxd);
889
890 next_buffer = &rx_ring->buffer_info[i];
891
892 cleaned = true;
893 cleaned_count++;
894 dma_unmap_single(&pdev->dev,
895 buffer_info->dma,
896 adapter->rx_buffer_len,
897 DMA_FROM_DEVICE);
898 buffer_info->dma = 0;
899
900 length = le16_to_cpu(rx_desc->wb.upper.length);
901
902 /*
903 * !EOP means multiple descriptors were used to store a single
904 * packet, if that's the case we need to toss it. In fact, we
905 * need to toss every packet with the EOP bit clear and the
906 * next frame that _does_ have the EOP bit set, as it is by
907 * definition only a frame fragment
908 */
909 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
910 adapter->flags2 |= FLAG2_IS_DISCARDING;
911
912 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
913 /* All receives must fit into a single buffer */
914 e_dbg("Receive packet consumed multiple buffers\n");
915 /* recycle */
916 buffer_info->skb = skb;
917 if (staterr & E1000_RXD_STAT_EOP)
918 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
919 goto next_desc;
920 }
921
922 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
923 !(netdev->features & NETIF_F_RXALL))) {
924 /* recycle */
925 buffer_info->skb = skb;
926 goto next_desc;
927 }
928
929 /* adjust length to remove Ethernet CRC */
930 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
931 /* If configured to store CRC, don't subtract FCS,
932 * but keep the FCS bytes out of the total_rx_bytes
933 * counter
934 */
935 if (netdev->features & NETIF_F_RXFCS)
936 total_rx_bytes -= 4;
937 else
938 length -= 4;
939 }
940
941 total_rx_bytes += length;
942 total_rx_packets++;
943
944 /*
945 * code added for copybreak, this should improve
946 * performance for small packets with large amounts
947 * of reassembly being done in the stack
948 */
949 if (length < copybreak) {
950 struct sk_buff *new_skb =
951 netdev_alloc_skb_ip_align(netdev, length);
952 if (new_skb) {
953 skb_copy_to_linear_data_offset(new_skb,
954 -NET_IP_ALIGN,
955 (skb->data -
956 NET_IP_ALIGN),
957 (length +
958 NET_IP_ALIGN));
959 /* save the skb in buffer_info as good */
960 buffer_info->skb = skb;
961 skb = new_skb;
962 }
963 /* else just continue with the old one */
964 }
965 /* end copybreak code */
966 skb_put(skb, length);
967
968 /* Receive Checksum Offload */
969 e1000_rx_checksum(adapter, staterr, skb);
970
971 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
972
973 e1000_receive_skb(adapter, netdev, skb, staterr,
974 rx_desc->wb.upper.vlan);
975
976 next_desc:
977 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
978
979 /* return some buffers to hardware, one at a time is too slow */
980 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
981 adapter->alloc_rx_buf(rx_ring, cleaned_count,
982 GFP_ATOMIC);
983 cleaned_count = 0;
984 }
985
986 /* use prefetched values */
987 rx_desc = next_rxd;
988 buffer_info = next_buffer;
989
990 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
991 }
992 rx_ring->next_to_clean = i;
993
994 cleaned_count = e1000_desc_unused(rx_ring);
995 if (cleaned_count)
996 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
997
998 adapter->total_rx_bytes += total_rx_bytes;
999 adapter->total_rx_packets += total_rx_packets;
1000 return cleaned;
1001 }
1002
e1000_put_txbuf(struct e1000_ring * tx_ring,struct e1000_buffer * buffer_info)1003 static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1004 struct e1000_buffer *buffer_info)
1005 {
1006 struct e1000_adapter *adapter = tx_ring->adapter;
1007
1008 if (buffer_info->dma) {
1009 if (buffer_info->mapped_as_page)
1010 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1011 buffer_info->length, DMA_TO_DEVICE);
1012 else
1013 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1014 buffer_info->length, DMA_TO_DEVICE);
1015 buffer_info->dma = 0;
1016 }
1017 if (buffer_info->skb) {
1018 dev_kfree_skb_any(buffer_info->skb);
1019 buffer_info->skb = NULL;
1020 }
1021 buffer_info->time_stamp = 0;
1022 }
1023
e1000_print_hw_hang(struct work_struct * work)1024 static void e1000_print_hw_hang(struct work_struct *work)
1025 {
1026 struct e1000_adapter *adapter = container_of(work,
1027 struct e1000_adapter,
1028 print_hang_task);
1029 struct net_device *netdev = adapter->netdev;
1030 struct e1000_ring *tx_ring = adapter->tx_ring;
1031 unsigned int i = tx_ring->next_to_clean;
1032 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1033 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1034 struct e1000_hw *hw = &adapter->hw;
1035 u16 phy_status, phy_1000t_status, phy_ext_status;
1036 u16 pci_status;
1037
1038 if (test_bit(__E1000_DOWN, &adapter->state))
1039 return;
1040
1041 if (!adapter->tx_hang_recheck &&
1042 (adapter->flags2 & FLAG2_DMA_BURST)) {
1043 /* May be block on write-back, flush and detect again
1044 * flush pending descriptor writebacks to memory
1045 */
1046 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1047 /* execute the writes immediately */
1048 e1e_flush();
1049 /*
1050 * Due to rare timing issues, write to TIDV again to ensure
1051 * the write is successful
1052 */
1053 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1054 /* execute the writes immediately */
1055 e1e_flush();
1056 adapter->tx_hang_recheck = true;
1057 return;
1058 }
1059 /* Real hang detected */
1060 adapter->tx_hang_recheck = false;
1061 netif_stop_queue(netdev);
1062
1063 e1e_rphy(hw, PHY_STATUS, &phy_status);
1064 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1065 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1066
1067 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1068
1069 /* detected Hardware unit hang */
1070 e_err("Detected Hardware Unit Hang:\n"
1071 " TDH <%x>\n"
1072 " TDT <%x>\n"
1073 " next_to_use <%x>\n"
1074 " next_to_clean <%x>\n"
1075 "buffer_info[next_to_clean]:\n"
1076 " time_stamp <%lx>\n"
1077 " next_to_watch <%x>\n"
1078 " jiffies <%lx>\n"
1079 " next_to_watch.status <%x>\n"
1080 "MAC Status <%x>\n"
1081 "PHY Status <%x>\n"
1082 "PHY 1000BASE-T Status <%x>\n"
1083 "PHY Extended Status <%x>\n"
1084 "PCI Status <%x>\n",
1085 readl(tx_ring->head),
1086 readl(tx_ring->tail),
1087 tx_ring->next_to_use,
1088 tx_ring->next_to_clean,
1089 tx_ring->buffer_info[eop].time_stamp,
1090 eop,
1091 jiffies,
1092 eop_desc->upper.fields.status,
1093 er32(STATUS),
1094 phy_status,
1095 phy_1000t_status,
1096 phy_ext_status,
1097 pci_status);
1098 }
1099
1100 /**
1101 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1102 * @tx_ring: Tx descriptor ring
1103 *
1104 * the return value indicates whether actual cleaning was done, there
1105 * is no guarantee that everything was cleaned
1106 **/
e1000_clean_tx_irq(struct e1000_ring * tx_ring)1107 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1108 {
1109 struct e1000_adapter *adapter = tx_ring->adapter;
1110 struct net_device *netdev = adapter->netdev;
1111 struct e1000_hw *hw = &adapter->hw;
1112 struct e1000_tx_desc *tx_desc, *eop_desc;
1113 struct e1000_buffer *buffer_info;
1114 unsigned int i, eop;
1115 unsigned int count = 0;
1116 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1117 unsigned int bytes_compl = 0, pkts_compl = 0;
1118
1119 i = tx_ring->next_to_clean;
1120 eop = tx_ring->buffer_info[i].next_to_watch;
1121 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1122
1123 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1124 (count < tx_ring->count)) {
1125 bool cleaned = false;
1126 rmb(); /* read buffer_info after eop_desc */
1127 for (; !cleaned; count++) {
1128 tx_desc = E1000_TX_DESC(*tx_ring, i);
1129 buffer_info = &tx_ring->buffer_info[i];
1130 cleaned = (i == eop);
1131
1132 if (cleaned) {
1133 total_tx_packets += buffer_info->segs;
1134 total_tx_bytes += buffer_info->bytecount;
1135 if (buffer_info->skb) {
1136 bytes_compl += buffer_info->skb->len;
1137 pkts_compl++;
1138 }
1139 }
1140
1141 e1000_put_txbuf(tx_ring, buffer_info);
1142 tx_desc->upper.data = 0;
1143
1144 i++;
1145 if (i == tx_ring->count)
1146 i = 0;
1147 }
1148
1149 if (i == tx_ring->next_to_use)
1150 break;
1151 eop = tx_ring->buffer_info[i].next_to_watch;
1152 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1153 }
1154
1155 tx_ring->next_to_clean = i;
1156
1157 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1158
1159 #define TX_WAKE_THRESHOLD 32
1160 if (count && netif_carrier_ok(netdev) &&
1161 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1162 /* Make sure that anybody stopping the queue after this
1163 * sees the new next_to_clean.
1164 */
1165 smp_mb();
1166
1167 if (netif_queue_stopped(netdev) &&
1168 !(test_bit(__E1000_DOWN, &adapter->state))) {
1169 netif_wake_queue(netdev);
1170 ++adapter->restart_queue;
1171 }
1172 }
1173
1174 if (adapter->detect_tx_hung) {
1175 /*
1176 * Detect a transmit hang in hardware, this serializes the
1177 * check with the clearing of time_stamp and movement of i
1178 */
1179 adapter->detect_tx_hung = false;
1180 if (tx_ring->buffer_info[i].time_stamp &&
1181 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1182 + (adapter->tx_timeout_factor * HZ)) &&
1183 !(er32(STATUS) & E1000_STATUS_TXOFF))
1184 schedule_work(&adapter->print_hang_task);
1185 else
1186 adapter->tx_hang_recheck = false;
1187 }
1188 adapter->total_tx_bytes += total_tx_bytes;
1189 adapter->total_tx_packets += total_tx_packets;
1190 return count < tx_ring->count;
1191 }
1192
1193 /**
1194 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1195 * @rx_ring: Rx descriptor ring
1196 *
1197 * the return value indicates whether actual cleaning was done, there
1198 * is no guarantee that everything was cleaned
1199 **/
e1000_clean_rx_irq_ps(struct e1000_ring * rx_ring,int * work_done,int work_to_do)1200 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1201 int work_to_do)
1202 {
1203 struct e1000_adapter *adapter = rx_ring->adapter;
1204 struct e1000_hw *hw = &adapter->hw;
1205 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1206 struct net_device *netdev = adapter->netdev;
1207 struct pci_dev *pdev = adapter->pdev;
1208 struct e1000_buffer *buffer_info, *next_buffer;
1209 struct e1000_ps_page *ps_page;
1210 struct sk_buff *skb;
1211 unsigned int i, j;
1212 u32 length, staterr;
1213 int cleaned_count = 0;
1214 bool cleaned = false;
1215 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1216
1217 i = rx_ring->next_to_clean;
1218 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1219 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1220 buffer_info = &rx_ring->buffer_info[i];
1221
1222 while (staterr & E1000_RXD_STAT_DD) {
1223 if (*work_done >= work_to_do)
1224 break;
1225 (*work_done)++;
1226 skb = buffer_info->skb;
1227 rmb(); /* read descriptor and rx_buffer_info after status DD */
1228
1229 /* in the packet split case this is header only */
1230 prefetch(skb->data - NET_IP_ALIGN);
1231
1232 i++;
1233 if (i == rx_ring->count)
1234 i = 0;
1235 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1236 prefetch(next_rxd);
1237
1238 next_buffer = &rx_ring->buffer_info[i];
1239
1240 cleaned = true;
1241 cleaned_count++;
1242 dma_unmap_single(&pdev->dev, buffer_info->dma,
1243 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1244 buffer_info->dma = 0;
1245
1246 /* see !EOP comment in other Rx routine */
1247 if (!(staterr & E1000_RXD_STAT_EOP))
1248 adapter->flags2 |= FLAG2_IS_DISCARDING;
1249
1250 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1251 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1252 dev_kfree_skb_irq(skb);
1253 if (staterr & E1000_RXD_STAT_EOP)
1254 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1255 goto next_desc;
1256 }
1257
1258 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1259 !(netdev->features & NETIF_F_RXALL))) {
1260 dev_kfree_skb_irq(skb);
1261 goto next_desc;
1262 }
1263
1264 length = le16_to_cpu(rx_desc->wb.middle.length0);
1265
1266 if (!length) {
1267 e_dbg("Last part of the packet spanning multiple descriptors\n");
1268 dev_kfree_skb_irq(skb);
1269 goto next_desc;
1270 }
1271
1272 /* Good Receive */
1273 skb_put(skb, length);
1274
1275 {
1276 /*
1277 * this looks ugly, but it seems compiler issues make
1278 * it more efficient than reusing j
1279 */
1280 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1281
1282 /*
1283 * page alloc/put takes too long and effects small
1284 * packet throughput, so unsplit small packets and
1285 * save the alloc/put only valid in softirq (napi)
1286 * context to call kmap_*
1287 */
1288 if (l1 && (l1 <= copybreak) &&
1289 ((length + l1) <= adapter->rx_ps_bsize0)) {
1290 u8 *vaddr;
1291
1292 ps_page = &buffer_info->ps_pages[0];
1293
1294 /*
1295 * there is no documentation about how to call
1296 * kmap_atomic, so we can't hold the mapping
1297 * very long
1298 */
1299 dma_sync_single_for_cpu(&pdev->dev,
1300 ps_page->dma,
1301 PAGE_SIZE,
1302 DMA_FROM_DEVICE);
1303 vaddr = kmap_atomic(ps_page->page);
1304 memcpy(skb_tail_pointer(skb), vaddr, l1);
1305 kunmap_atomic(vaddr);
1306 dma_sync_single_for_device(&pdev->dev,
1307 ps_page->dma,
1308 PAGE_SIZE,
1309 DMA_FROM_DEVICE);
1310
1311 /* remove the CRC */
1312 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1313 if (!(netdev->features & NETIF_F_RXFCS))
1314 l1 -= 4;
1315 }
1316
1317 skb_put(skb, l1);
1318 goto copydone;
1319 } /* if */
1320 }
1321
1322 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1323 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1324 if (!length)
1325 break;
1326
1327 ps_page = &buffer_info->ps_pages[j];
1328 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1329 DMA_FROM_DEVICE);
1330 ps_page->dma = 0;
1331 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1332 ps_page->page = NULL;
1333 skb->len += length;
1334 skb->data_len += length;
1335 skb->truesize += PAGE_SIZE;
1336 }
1337
1338 /* strip the ethernet crc, problem is we're using pages now so
1339 * this whole operation can get a little cpu intensive
1340 */
1341 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1342 if (!(netdev->features & NETIF_F_RXFCS))
1343 pskb_trim(skb, skb->len - 4);
1344 }
1345
1346 copydone:
1347 total_rx_bytes += skb->len;
1348 total_rx_packets++;
1349
1350 e1000_rx_checksum(adapter, staterr, skb);
1351
1352 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1353
1354 if (rx_desc->wb.upper.header_status &
1355 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1356 adapter->rx_hdr_split++;
1357
1358 e1000_receive_skb(adapter, netdev, skb,
1359 staterr, rx_desc->wb.middle.vlan);
1360
1361 next_desc:
1362 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1363 buffer_info->skb = NULL;
1364
1365 /* return some buffers to hardware, one at a time is too slow */
1366 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1367 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1368 GFP_ATOMIC);
1369 cleaned_count = 0;
1370 }
1371
1372 /* use prefetched values */
1373 rx_desc = next_rxd;
1374 buffer_info = next_buffer;
1375
1376 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1377 }
1378 rx_ring->next_to_clean = i;
1379
1380 cleaned_count = e1000_desc_unused(rx_ring);
1381 if (cleaned_count)
1382 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1383
1384 adapter->total_rx_bytes += total_rx_bytes;
1385 adapter->total_rx_packets += total_rx_packets;
1386 return cleaned;
1387 }
1388
1389 /**
1390 * e1000_consume_page - helper function
1391 **/
e1000_consume_page(struct e1000_buffer * bi,struct sk_buff * skb,u16 length)1392 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1393 u16 length)
1394 {
1395 bi->page = NULL;
1396 skb->len += length;
1397 skb->data_len += length;
1398 skb->truesize += PAGE_SIZE;
1399 }
1400
1401 /**
1402 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1403 * @adapter: board private structure
1404 *
1405 * the return value indicates whether actual cleaning was done, there
1406 * is no guarantee that everything was cleaned
1407 **/
e1000_clean_jumbo_rx_irq(struct e1000_ring * rx_ring,int * work_done,int work_to_do)1408 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1409 int work_to_do)
1410 {
1411 struct e1000_adapter *adapter = rx_ring->adapter;
1412 struct net_device *netdev = adapter->netdev;
1413 struct pci_dev *pdev = adapter->pdev;
1414 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1415 struct e1000_buffer *buffer_info, *next_buffer;
1416 u32 length, staterr;
1417 unsigned int i;
1418 int cleaned_count = 0;
1419 bool cleaned = false;
1420 unsigned int total_rx_bytes=0, total_rx_packets=0;
1421
1422 i = rx_ring->next_to_clean;
1423 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1424 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1425 buffer_info = &rx_ring->buffer_info[i];
1426
1427 while (staterr & E1000_RXD_STAT_DD) {
1428 struct sk_buff *skb;
1429
1430 if (*work_done >= work_to_do)
1431 break;
1432 (*work_done)++;
1433 rmb(); /* read descriptor and rx_buffer_info after status DD */
1434
1435 skb = buffer_info->skb;
1436 buffer_info->skb = NULL;
1437
1438 ++i;
1439 if (i == rx_ring->count)
1440 i = 0;
1441 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1442 prefetch(next_rxd);
1443
1444 next_buffer = &rx_ring->buffer_info[i];
1445
1446 cleaned = true;
1447 cleaned_count++;
1448 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1449 DMA_FROM_DEVICE);
1450 buffer_info->dma = 0;
1451
1452 length = le16_to_cpu(rx_desc->wb.upper.length);
1453
1454 /* errors is only valid for DD + EOP descriptors */
1455 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1456 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1457 !(netdev->features & NETIF_F_RXALL)))) {
1458 /* recycle both page and skb */
1459 buffer_info->skb = skb;
1460 /* an error means any chain goes out the window too */
1461 if (rx_ring->rx_skb_top)
1462 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1463 rx_ring->rx_skb_top = NULL;
1464 goto next_desc;
1465 }
1466
1467 #define rxtop (rx_ring->rx_skb_top)
1468 if (!(staterr & E1000_RXD_STAT_EOP)) {
1469 /* this descriptor is only the beginning (or middle) */
1470 if (!rxtop) {
1471 /* this is the beginning of a chain */
1472 rxtop = skb;
1473 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1474 0, length);
1475 } else {
1476 /* this is the middle of a chain */
1477 skb_fill_page_desc(rxtop,
1478 skb_shinfo(rxtop)->nr_frags,
1479 buffer_info->page, 0, length);
1480 /* re-use the skb, only consumed the page */
1481 buffer_info->skb = skb;
1482 }
1483 e1000_consume_page(buffer_info, rxtop, length);
1484 goto next_desc;
1485 } else {
1486 if (rxtop) {
1487 /* end of the chain */
1488 skb_fill_page_desc(rxtop,
1489 skb_shinfo(rxtop)->nr_frags,
1490 buffer_info->page, 0, length);
1491 /* re-use the current skb, we only consumed the
1492 * page */
1493 buffer_info->skb = skb;
1494 skb = rxtop;
1495 rxtop = NULL;
1496 e1000_consume_page(buffer_info, skb, length);
1497 } else {
1498 /* no chain, got EOP, this buf is the packet
1499 * copybreak to save the put_page/alloc_page */
1500 if (length <= copybreak &&
1501 skb_tailroom(skb) >= length) {
1502 u8 *vaddr;
1503 vaddr = kmap_atomic(buffer_info->page);
1504 memcpy(skb_tail_pointer(skb), vaddr,
1505 length);
1506 kunmap_atomic(vaddr);
1507 /* re-use the page, so don't erase
1508 * buffer_info->page */
1509 skb_put(skb, length);
1510 } else {
1511 skb_fill_page_desc(skb, 0,
1512 buffer_info->page, 0,
1513 length);
1514 e1000_consume_page(buffer_info, skb,
1515 length);
1516 }
1517 }
1518 }
1519
1520 /* Receive Checksum Offload */
1521 e1000_rx_checksum(adapter, staterr, skb);
1522
1523 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1524
1525 /* probably a little skewed due to removing CRC */
1526 total_rx_bytes += skb->len;
1527 total_rx_packets++;
1528
1529 /* eth type trans needs skb->data to point to something */
1530 if (!pskb_may_pull(skb, ETH_HLEN)) {
1531 e_err("pskb_may_pull failed.\n");
1532 dev_kfree_skb_irq(skb);
1533 goto next_desc;
1534 }
1535
1536 e1000_receive_skb(adapter, netdev, skb, staterr,
1537 rx_desc->wb.upper.vlan);
1538
1539 next_desc:
1540 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1541
1542 /* return some buffers to hardware, one at a time is too slow */
1543 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1544 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1545 GFP_ATOMIC);
1546 cleaned_count = 0;
1547 }
1548
1549 /* use prefetched values */
1550 rx_desc = next_rxd;
1551 buffer_info = next_buffer;
1552
1553 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1554 }
1555 rx_ring->next_to_clean = i;
1556
1557 cleaned_count = e1000_desc_unused(rx_ring);
1558 if (cleaned_count)
1559 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1560
1561 adapter->total_rx_bytes += total_rx_bytes;
1562 adapter->total_rx_packets += total_rx_packets;
1563 return cleaned;
1564 }
1565
1566 /**
1567 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1568 * @rx_ring: Rx descriptor ring
1569 **/
e1000_clean_rx_ring(struct e1000_ring * rx_ring)1570 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1571 {
1572 struct e1000_adapter *adapter = rx_ring->adapter;
1573 struct e1000_buffer *buffer_info;
1574 struct e1000_ps_page *ps_page;
1575 struct pci_dev *pdev = adapter->pdev;
1576 unsigned int i, j;
1577
1578 /* Free all the Rx ring sk_buffs */
1579 for (i = 0; i < rx_ring->count; i++) {
1580 buffer_info = &rx_ring->buffer_info[i];
1581 if (buffer_info->dma) {
1582 if (adapter->clean_rx == e1000_clean_rx_irq)
1583 dma_unmap_single(&pdev->dev, buffer_info->dma,
1584 adapter->rx_buffer_len,
1585 DMA_FROM_DEVICE);
1586 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1587 dma_unmap_page(&pdev->dev, buffer_info->dma,
1588 PAGE_SIZE,
1589 DMA_FROM_DEVICE);
1590 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1591 dma_unmap_single(&pdev->dev, buffer_info->dma,
1592 adapter->rx_ps_bsize0,
1593 DMA_FROM_DEVICE);
1594 buffer_info->dma = 0;
1595 }
1596
1597 if (buffer_info->page) {
1598 put_page(buffer_info->page);
1599 buffer_info->page = NULL;
1600 }
1601
1602 if (buffer_info->skb) {
1603 dev_kfree_skb(buffer_info->skb);
1604 buffer_info->skb = NULL;
1605 }
1606
1607 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1608 ps_page = &buffer_info->ps_pages[j];
1609 if (!ps_page->page)
1610 break;
1611 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1612 DMA_FROM_DEVICE);
1613 ps_page->dma = 0;
1614 put_page(ps_page->page);
1615 ps_page->page = NULL;
1616 }
1617 }
1618
1619 /* there also may be some cached data from a chained receive */
1620 if (rx_ring->rx_skb_top) {
1621 dev_kfree_skb(rx_ring->rx_skb_top);
1622 rx_ring->rx_skb_top = NULL;
1623 }
1624
1625 /* Zero out the descriptor ring */
1626 memset(rx_ring->desc, 0, rx_ring->size);
1627
1628 rx_ring->next_to_clean = 0;
1629 rx_ring->next_to_use = 0;
1630 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1631
1632 writel(0, rx_ring->head);
1633 writel(0, rx_ring->tail);
1634 }
1635
e1000e_downshift_workaround(struct work_struct * work)1636 static void e1000e_downshift_workaround(struct work_struct *work)
1637 {
1638 struct e1000_adapter *adapter = container_of(work,
1639 struct e1000_adapter, downshift_task);
1640
1641 if (test_bit(__E1000_DOWN, &adapter->state))
1642 return;
1643
1644 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1645 }
1646
1647 /**
1648 * e1000_intr_msi - Interrupt Handler
1649 * @irq: interrupt number
1650 * @data: pointer to a network interface device structure
1651 **/
e1000_intr_msi(int irq,void * data)1652 static irqreturn_t e1000_intr_msi(int irq, void *data)
1653 {
1654 struct net_device *netdev = data;
1655 struct e1000_adapter *adapter = netdev_priv(netdev);
1656 struct e1000_hw *hw = &adapter->hw;
1657 u32 icr = er32(ICR);
1658
1659 /*
1660 * read ICR disables interrupts using IAM
1661 */
1662
1663 if (icr & E1000_ICR_LSC) {
1664 hw->mac.get_link_status = true;
1665 /*
1666 * ICH8 workaround-- Call gig speed drop workaround on cable
1667 * disconnect (LSC) before accessing any PHY registers
1668 */
1669 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1670 (!(er32(STATUS) & E1000_STATUS_LU)))
1671 schedule_work(&adapter->downshift_task);
1672
1673 /*
1674 * 80003ES2LAN workaround-- For packet buffer work-around on
1675 * link down event; disable receives here in the ISR and reset
1676 * adapter in watchdog
1677 */
1678 if (netif_carrier_ok(netdev) &&
1679 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1680 /* disable receives */
1681 u32 rctl = er32(RCTL);
1682 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1683 adapter->flags |= FLAG_RX_RESTART_NOW;
1684 }
1685 /* guard against interrupt when we're going down */
1686 if (!test_bit(__E1000_DOWN, &adapter->state))
1687 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1688 }
1689
1690 if (napi_schedule_prep(&adapter->napi)) {
1691 adapter->total_tx_bytes = 0;
1692 adapter->total_tx_packets = 0;
1693 adapter->total_rx_bytes = 0;
1694 adapter->total_rx_packets = 0;
1695 __napi_schedule(&adapter->napi);
1696 }
1697
1698 return IRQ_HANDLED;
1699 }
1700
1701 /**
1702 * e1000_intr - Interrupt Handler
1703 * @irq: interrupt number
1704 * @data: pointer to a network interface device structure
1705 **/
e1000_intr(int irq,void * data)1706 static irqreturn_t e1000_intr(int irq, void *data)
1707 {
1708 struct net_device *netdev = data;
1709 struct e1000_adapter *adapter = netdev_priv(netdev);
1710 struct e1000_hw *hw = &adapter->hw;
1711 u32 rctl, icr = er32(ICR);
1712
1713 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1714 return IRQ_NONE; /* Not our interrupt */
1715
1716 /*
1717 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1718 * not set, then the adapter didn't send an interrupt
1719 */
1720 if (!(icr & E1000_ICR_INT_ASSERTED))
1721 return IRQ_NONE;
1722
1723 /*
1724 * Interrupt Auto-Mask...upon reading ICR,
1725 * interrupts are masked. No need for the
1726 * IMC write
1727 */
1728
1729 if (icr & E1000_ICR_LSC) {
1730 hw->mac.get_link_status = true;
1731 /*
1732 * ICH8 workaround-- Call gig speed drop workaround on cable
1733 * disconnect (LSC) before accessing any PHY registers
1734 */
1735 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1736 (!(er32(STATUS) & E1000_STATUS_LU)))
1737 schedule_work(&adapter->downshift_task);
1738
1739 /*
1740 * 80003ES2LAN workaround--
1741 * For packet buffer work-around on link down event;
1742 * disable receives here in the ISR and
1743 * reset adapter in watchdog
1744 */
1745 if (netif_carrier_ok(netdev) &&
1746 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1747 /* disable receives */
1748 rctl = er32(RCTL);
1749 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1750 adapter->flags |= FLAG_RX_RESTART_NOW;
1751 }
1752 /* guard against interrupt when we're going down */
1753 if (!test_bit(__E1000_DOWN, &adapter->state))
1754 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1755 }
1756
1757 if (napi_schedule_prep(&adapter->napi)) {
1758 adapter->total_tx_bytes = 0;
1759 adapter->total_tx_packets = 0;
1760 adapter->total_rx_bytes = 0;
1761 adapter->total_rx_packets = 0;
1762 __napi_schedule(&adapter->napi);
1763 }
1764
1765 return IRQ_HANDLED;
1766 }
1767
e1000_msix_other(int irq,void * data)1768 static irqreturn_t e1000_msix_other(int irq, void *data)
1769 {
1770 struct net_device *netdev = data;
1771 struct e1000_adapter *adapter = netdev_priv(netdev);
1772 struct e1000_hw *hw = &adapter->hw;
1773 u32 icr = er32(ICR);
1774
1775 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1776 if (!test_bit(__E1000_DOWN, &adapter->state))
1777 ew32(IMS, E1000_IMS_OTHER);
1778 return IRQ_NONE;
1779 }
1780
1781 if (icr & adapter->eiac_mask)
1782 ew32(ICS, (icr & adapter->eiac_mask));
1783
1784 if (icr & E1000_ICR_OTHER) {
1785 if (!(icr & E1000_ICR_LSC))
1786 goto no_link_interrupt;
1787 hw->mac.get_link_status = true;
1788 /* guard against interrupt when we're going down */
1789 if (!test_bit(__E1000_DOWN, &adapter->state))
1790 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1791 }
1792
1793 no_link_interrupt:
1794 if (!test_bit(__E1000_DOWN, &adapter->state))
1795 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1796
1797 return IRQ_HANDLED;
1798 }
1799
1800
e1000_intr_msix_tx(int irq,void * data)1801 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1802 {
1803 struct net_device *netdev = data;
1804 struct e1000_adapter *adapter = netdev_priv(netdev);
1805 struct e1000_hw *hw = &adapter->hw;
1806 struct e1000_ring *tx_ring = adapter->tx_ring;
1807
1808
1809 adapter->total_tx_bytes = 0;
1810 adapter->total_tx_packets = 0;
1811
1812 if (!e1000_clean_tx_irq(tx_ring))
1813 /* Ring was not completely cleaned, so fire another interrupt */
1814 ew32(ICS, tx_ring->ims_val);
1815
1816 return IRQ_HANDLED;
1817 }
1818
e1000_intr_msix_rx(int irq,void * data)1819 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1820 {
1821 struct net_device *netdev = data;
1822 struct e1000_adapter *adapter = netdev_priv(netdev);
1823 struct e1000_ring *rx_ring = adapter->rx_ring;
1824
1825 /* Write the ITR value calculated at the end of the
1826 * previous interrupt.
1827 */
1828 if (rx_ring->set_itr) {
1829 writel(1000000000 / (rx_ring->itr_val * 256),
1830 rx_ring->itr_register);
1831 rx_ring->set_itr = 0;
1832 }
1833
1834 if (napi_schedule_prep(&adapter->napi)) {
1835 adapter->total_rx_bytes = 0;
1836 adapter->total_rx_packets = 0;
1837 __napi_schedule(&adapter->napi);
1838 }
1839 return IRQ_HANDLED;
1840 }
1841
1842 /**
1843 * e1000_configure_msix - Configure MSI-X hardware
1844 *
1845 * e1000_configure_msix sets up the hardware to properly
1846 * generate MSI-X interrupts.
1847 **/
e1000_configure_msix(struct e1000_adapter * adapter)1848 static void e1000_configure_msix(struct e1000_adapter *adapter)
1849 {
1850 struct e1000_hw *hw = &adapter->hw;
1851 struct e1000_ring *rx_ring = adapter->rx_ring;
1852 struct e1000_ring *tx_ring = adapter->tx_ring;
1853 int vector = 0;
1854 u32 ctrl_ext, ivar = 0;
1855
1856 adapter->eiac_mask = 0;
1857
1858 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1859 if (hw->mac.type == e1000_82574) {
1860 u32 rfctl = er32(RFCTL);
1861 rfctl |= E1000_RFCTL_ACK_DIS;
1862 ew32(RFCTL, rfctl);
1863 }
1864
1865 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1866 /* Configure Rx vector */
1867 rx_ring->ims_val = E1000_IMS_RXQ0;
1868 adapter->eiac_mask |= rx_ring->ims_val;
1869 if (rx_ring->itr_val)
1870 writel(1000000000 / (rx_ring->itr_val * 256),
1871 rx_ring->itr_register);
1872 else
1873 writel(1, rx_ring->itr_register);
1874 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1875
1876 /* Configure Tx vector */
1877 tx_ring->ims_val = E1000_IMS_TXQ0;
1878 vector++;
1879 if (tx_ring->itr_val)
1880 writel(1000000000 / (tx_ring->itr_val * 256),
1881 tx_ring->itr_register);
1882 else
1883 writel(1, tx_ring->itr_register);
1884 adapter->eiac_mask |= tx_ring->ims_val;
1885 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1886
1887 /* set vector for Other Causes, e.g. link changes */
1888 vector++;
1889 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1890 if (rx_ring->itr_val)
1891 writel(1000000000 / (rx_ring->itr_val * 256),
1892 hw->hw_addr + E1000_EITR_82574(vector));
1893 else
1894 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1895
1896 /* Cause Tx interrupts on every write back */
1897 ivar |= (1 << 31);
1898
1899 ew32(IVAR, ivar);
1900
1901 /* enable MSI-X PBA support */
1902 ctrl_ext = er32(CTRL_EXT);
1903 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1904
1905 /* Auto-Mask Other interrupts upon ICR read */
1906 #define E1000_EIAC_MASK_82574 0x01F00000
1907 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1908 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1909 ew32(CTRL_EXT, ctrl_ext);
1910 e1e_flush();
1911 }
1912
e1000e_reset_interrupt_capability(struct e1000_adapter * adapter)1913 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1914 {
1915 if (adapter->msix_entries) {
1916 pci_disable_msix(adapter->pdev);
1917 kfree(adapter->msix_entries);
1918 adapter->msix_entries = NULL;
1919 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1920 pci_disable_msi(adapter->pdev);
1921 adapter->flags &= ~FLAG_MSI_ENABLED;
1922 }
1923 }
1924
1925 /**
1926 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1927 *
1928 * Attempt to configure interrupts using the best available
1929 * capabilities of the hardware and kernel.
1930 **/
e1000e_set_interrupt_capability(struct e1000_adapter * adapter)1931 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1932 {
1933 int err;
1934 int i;
1935
1936 switch (adapter->int_mode) {
1937 case E1000E_INT_MODE_MSIX:
1938 if (adapter->flags & FLAG_HAS_MSIX) {
1939 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1940 adapter->msix_entries = kcalloc(adapter->num_vectors,
1941 sizeof(struct msix_entry),
1942 GFP_KERNEL);
1943 if (adapter->msix_entries) {
1944 for (i = 0; i < adapter->num_vectors; i++)
1945 adapter->msix_entries[i].entry = i;
1946
1947 err = pci_enable_msix(adapter->pdev,
1948 adapter->msix_entries,
1949 adapter->num_vectors);
1950 if (err == 0)
1951 return;
1952 }
1953 /* MSI-X failed, so fall through and try MSI */
1954 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
1955 e1000e_reset_interrupt_capability(adapter);
1956 }
1957 adapter->int_mode = E1000E_INT_MODE_MSI;
1958 /* Fall through */
1959 case E1000E_INT_MODE_MSI:
1960 if (!pci_enable_msi(adapter->pdev)) {
1961 adapter->flags |= FLAG_MSI_ENABLED;
1962 } else {
1963 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1964 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
1965 }
1966 /* Fall through */
1967 case E1000E_INT_MODE_LEGACY:
1968 /* Don't do anything; this is the system default */
1969 break;
1970 }
1971
1972 /* store the number of vectors being used */
1973 adapter->num_vectors = 1;
1974 }
1975
1976 /**
1977 * e1000_request_msix - Initialize MSI-X interrupts
1978 *
1979 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1980 * kernel.
1981 **/
e1000_request_msix(struct e1000_adapter * adapter)1982 static int e1000_request_msix(struct e1000_adapter *adapter)
1983 {
1984 struct net_device *netdev = adapter->netdev;
1985 int err = 0, vector = 0;
1986
1987 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1988 snprintf(adapter->rx_ring->name,
1989 sizeof(adapter->rx_ring->name) - 1,
1990 "%s-rx-0", netdev->name);
1991 else
1992 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1993 err = request_irq(adapter->msix_entries[vector].vector,
1994 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1995 netdev);
1996 if (err)
1997 return err;
1998 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
1999 E1000_EITR_82574(vector);
2000 adapter->rx_ring->itr_val = adapter->itr;
2001 vector++;
2002
2003 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2004 snprintf(adapter->tx_ring->name,
2005 sizeof(adapter->tx_ring->name) - 1,
2006 "%s-tx-0", netdev->name);
2007 else
2008 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2009 err = request_irq(adapter->msix_entries[vector].vector,
2010 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2011 netdev);
2012 if (err)
2013 return err;
2014 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2015 E1000_EITR_82574(vector);
2016 adapter->tx_ring->itr_val = adapter->itr;
2017 vector++;
2018
2019 err = request_irq(adapter->msix_entries[vector].vector,
2020 e1000_msix_other, 0, netdev->name, netdev);
2021 if (err)
2022 return err;
2023
2024 e1000_configure_msix(adapter);
2025
2026 return 0;
2027 }
2028
2029 /**
2030 * e1000_request_irq - initialize interrupts
2031 *
2032 * Attempts to configure interrupts using the best available
2033 * capabilities of the hardware and kernel.
2034 **/
e1000_request_irq(struct e1000_adapter * adapter)2035 static int e1000_request_irq(struct e1000_adapter *adapter)
2036 {
2037 struct net_device *netdev = adapter->netdev;
2038 int err;
2039
2040 if (adapter->msix_entries) {
2041 err = e1000_request_msix(adapter);
2042 if (!err)
2043 return err;
2044 /* fall back to MSI */
2045 e1000e_reset_interrupt_capability(adapter);
2046 adapter->int_mode = E1000E_INT_MODE_MSI;
2047 e1000e_set_interrupt_capability(adapter);
2048 }
2049 if (adapter->flags & FLAG_MSI_ENABLED) {
2050 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2051 netdev->name, netdev);
2052 if (!err)
2053 return err;
2054
2055 /* fall back to legacy interrupt */
2056 e1000e_reset_interrupt_capability(adapter);
2057 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2058 }
2059
2060 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2061 netdev->name, netdev);
2062 if (err)
2063 e_err("Unable to allocate interrupt, Error: %d\n", err);
2064
2065 return err;
2066 }
2067
e1000_free_irq(struct e1000_adapter * adapter)2068 static void e1000_free_irq(struct e1000_adapter *adapter)
2069 {
2070 struct net_device *netdev = adapter->netdev;
2071
2072 if (adapter->msix_entries) {
2073 int vector = 0;
2074
2075 free_irq(adapter->msix_entries[vector].vector, netdev);
2076 vector++;
2077
2078 free_irq(adapter->msix_entries[vector].vector, netdev);
2079 vector++;
2080
2081 /* Other Causes interrupt vector */
2082 free_irq(adapter->msix_entries[vector].vector, netdev);
2083 return;
2084 }
2085
2086 free_irq(adapter->pdev->irq, netdev);
2087 }
2088
2089 /**
2090 * e1000_irq_disable - Mask off interrupt generation on the NIC
2091 **/
e1000_irq_disable(struct e1000_adapter * adapter)2092 static void e1000_irq_disable(struct e1000_adapter *adapter)
2093 {
2094 struct e1000_hw *hw = &adapter->hw;
2095
2096 ew32(IMC, ~0);
2097 if (adapter->msix_entries)
2098 ew32(EIAC_82574, 0);
2099 e1e_flush();
2100
2101 if (adapter->msix_entries) {
2102 int i;
2103 for (i = 0; i < adapter->num_vectors; i++)
2104 synchronize_irq(adapter->msix_entries[i].vector);
2105 } else {
2106 synchronize_irq(adapter->pdev->irq);
2107 }
2108 }
2109
2110 /**
2111 * e1000_irq_enable - Enable default interrupt generation settings
2112 **/
e1000_irq_enable(struct e1000_adapter * adapter)2113 static void e1000_irq_enable(struct e1000_adapter *adapter)
2114 {
2115 struct e1000_hw *hw = &adapter->hw;
2116
2117 if (adapter->msix_entries) {
2118 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2119 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2120 } else {
2121 ew32(IMS, IMS_ENABLE_MASK);
2122 }
2123 e1e_flush();
2124 }
2125
2126 /**
2127 * e1000e_get_hw_control - get control of the h/w from f/w
2128 * @adapter: address of board private structure
2129 *
2130 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2131 * For ASF and Pass Through versions of f/w this means that
2132 * the driver is loaded. For AMT version (only with 82573)
2133 * of the f/w this means that the network i/f is open.
2134 **/
e1000e_get_hw_control(struct e1000_adapter * adapter)2135 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2136 {
2137 struct e1000_hw *hw = &adapter->hw;
2138 u32 ctrl_ext;
2139 u32 swsm;
2140
2141 /* Let firmware know the driver has taken over */
2142 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2143 swsm = er32(SWSM);
2144 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2145 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2146 ctrl_ext = er32(CTRL_EXT);
2147 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2148 }
2149 }
2150
2151 /**
2152 * e1000e_release_hw_control - release control of the h/w to f/w
2153 * @adapter: address of board private structure
2154 *
2155 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2156 * For ASF and Pass Through versions of f/w this means that the
2157 * driver is no longer loaded. For AMT version (only with 82573) i
2158 * of the f/w this means that the network i/f is closed.
2159 *
2160 **/
e1000e_release_hw_control(struct e1000_adapter * adapter)2161 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2162 {
2163 struct e1000_hw *hw = &adapter->hw;
2164 u32 ctrl_ext;
2165 u32 swsm;
2166
2167 /* Let firmware taken over control of h/w */
2168 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2169 swsm = er32(SWSM);
2170 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2171 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2172 ctrl_ext = er32(CTRL_EXT);
2173 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2174 }
2175 }
2176
2177 /**
2178 * @e1000_alloc_ring - allocate memory for a ring structure
2179 **/
e1000_alloc_ring_dma(struct e1000_adapter * adapter,struct e1000_ring * ring)2180 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2181 struct e1000_ring *ring)
2182 {
2183 struct pci_dev *pdev = adapter->pdev;
2184
2185 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2186 GFP_KERNEL);
2187 if (!ring->desc)
2188 return -ENOMEM;
2189
2190 return 0;
2191 }
2192
2193 /**
2194 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2195 * @tx_ring: Tx descriptor ring
2196 *
2197 * Return 0 on success, negative on failure
2198 **/
e1000e_setup_tx_resources(struct e1000_ring * tx_ring)2199 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2200 {
2201 struct e1000_adapter *adapter = tx_ring->adapter;
2202 int err = -ENOMEM, size;
2203
2204 size = sizeof(struct e1000_buffer) * tx_ring->count;
2205 tx_ring->buffer_info = vzalloc(size);
2206 if (!tx_ring->buffer_info)
2207 goto err;
2208
2209 /* round up to nearest 4K */
2210 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2211 tx_ring->size = ALIGN(tx_ring->size, 4096);
2212
2213 err = e1000_alloc_ring_dma(adapter, tx_ring);
2214 if (err)
2215 goto err;
2216
2217 tx_ring->next_to_use = 0;
2218 tx_ring->next_to_clean = 0;
2219
2220 return 0;
2221 err:
2222 vfree(tx_ring->buffer_info);
2223 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2224 return err;
2225 }
2226
2227 /**
2228 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2229 * @rx_ring: Rx descriptor ring
2230 *
2231 * Returns 0 on success, negative on failure
2232 **/
e1000e_setup_rx_resources(struct e1000_ring * rx_ring)2233 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2234 {
2235 struct e1000_adapter *adapter = rx_ring->adapter;
2236 struct e1000_buffer *buffer_info;
2237 int i, size, desc_len, err = -ENOMEM;
2238
2239 size = sizeof(struct e1000_buffer) * rx_ring->count;
2240 rx_ring->buffer_info = vzalloc(size);
2241 if (!rx_ring->buffer_info)
2242 goto err;
2243
2244 for (i = 0; i < rx_ring->count; i++) {
2245 buffer_info = &rx_ring->buffer_info[i];
2246 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2247 sizeof(struct e1000_ps_page),
2248 GFP_KERNEL);
2249 if (!buffer_info->ps_pages)
2250 goto err_pages;
2251 }
2252
2253 desc_len = sizeof(union e1000_rx_desc_packet_split);
2254
2255 /* Round up to nearest 4K */
2256 rx_ring->size = rx_ring->count * desc_len;
2257 rx_ring->size = ALIGN(rx_ring->size, 4096);
2258
2259 err = e1000_alloc_ring_dma(adapter, rx_ring);
2260 if (err)
2261 goto err_pages;
2262
2263 rx_ring->next_to_clean = 0;
2264 rx_ring->next_to_use = 0;
2265 rx_ring->rx_skb_top = NULL;
2266
2267 return 0;
2268
2269 err_pages:
2270 for (i = 0; i < rx_ring->count; i++) {
2271 buffer_info = &rx_ring->buffer_info[i];
2272 kfree(buffer_info->ps_pages);
2273 }
2274 err:
2275 vfree(rx_ring->buffer_info);
2276 e_err("Unable to allocate memory for the receive descriptor ring\n");
2277 return err;
2278 }
2279
2280 /**
2281 * e1000_clean_tx_ring - Free Tx Buffers
2282 * @tx_ring: Tx descriptor ring
2283 **/
e1000_clean_tx_ring(struct e1000_ring * tx_ring)2284 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2285 {
2286 struct e1000_adapter *adapter = tx_ring->adapter;
2287 struct e1000_buffer *buffer_info;
2288 unsigned long size;
2289 unsigned int i;
2290
2291 for (i = 0; i < tx_ring->count; i++) {
2292 buffer_info = &tx_ring->buffer_info[i];
2293 e1000_put_txbuf(tx_ring, buffer_info);
2294 }
2295
2296 netdev_reset_queue(adapter->netdev);
2297 size = sizeof(struct e1000_buffer) * tx_ring->count;
2298 memset(tx_ring->buffer_info, 0, size);
2299
2300 memset(tx_ring->desc, 0, tx_ring->size);
2301
2302 tx_ring->next_to_use = 0;
2303 tx_ring->next_to_clean = 0;
2304
2305 writel(0, tx_ring->head);
2306 writel(0, tx_ring->tail);
2307 }
2308
2309 /**
2310 * e1000e_free_tx_resources - Free Tx Resources per Queue
2311 * @tx_ring: Tx descriptor ring
2312 *
2313 * Free all transmit software resources
2314 **/
e1000e_free_tx_resources(struct e1000_ring * tx_ring)2315 void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2316 {
2317 struct e1000_adapter *adapter = tx_ring->adapter;
2318 struct pci_dev *pdev = adapter->pdev;
2319
2320 e1000_clean_tx_ring(tx_ring);
2321
2322 vfree(tx_ring->buffer_info);
2323 tx_ring->buffer_info = NULL;
2324
2325 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2326 tx_ring->dma);
2327 tx_ring->desc = NULL;
2328 }
2329
2330 /**
2331 * e1000e_free_rx_resources - Free Rx Resources
2332 * @rx_ring: Rx descriptor ring
2333 *
2334 * Free all receive software resources
2335 **/
e1000e_free_rx_resources(struct e1000_ring * rx_ring)2336 void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2337 {
2338 struct e1000_adapter *adapter = rx_ring->adapter;
2339 struct pci_dev *pdev = adapter->pdev;
2340 int i;
2341
2342 e1000_clean_rx_ring(rx_ring);
2343
2344 for (i = 0; i < rx_ring->count; i++)
2345 kfree(rx_ring->buffer_info[i].ps_pages);
2346
2347 vfree(rx_ring->buffer_info);
2348 rx_ring->buffer_info = NULL;
2349
2350 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2351 rx_ring->dma);
2352 rx_ring->desc = NULL;
2353 }
2354
2355 /**
2356 * e1000_update_itr - update the dynamic ITR value based on statistics
2357 * @adapter: pointer to adapter
2358 * @itr_setting: current adapter->itr
2359 * @packets: the number of packets during this measurement interval
2360 * @bytes: the number of bytes during this measurement interval
2361 *
2362 * Stores a new ITR value based on packets and byte
2363 * counts during the last interrupt. The advantage of per interrupt
2364 * computation is faster updates and more accurate ITR for the current
2365 * traffic pattern. Constants in this function were computed
2366 * based on theoretical maximum wire speed and thresholds were set based
2367 * on testing data as well as attempting to minimize response time
2368 * while increasing bulk throughput. This functionality is controlled
2369 * by the InterruptThrottleRate module parameter.
2370 **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2371 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2372 u16 itr_setting, int packets,
2373 int bytes)
2374 {
2375 unsigned int retval = itr_setting;
2376
2377 if (packets == 0)
2378 return itr_setting;
2379
2380 switch (itr_setting) {
2381 case lowest_latency:
2382 /* handle TSO and jumbo frames */
2383 if (bytes/packets > 8000)
2384 retval = bulk_latency;
2385 else if ((packets < 5) && (bytes > 512))
2386 retval = low_latency;
2387 break;
2388 case low_latency: /* 50 usec aka 20000 ints/s */
2389 if (bytes > 10000) {
2390 /* this if handles the TSO accounting */
2391 if (bytes/packets > 8000)
2392 retval = bulk_latency;
2393 else if ((packets < 10) || ((bytes/packets) > 1200))
2394 retval = bulk_latency;
2395 else if ((packets > 35))
2396 retval = lowest_latency;
2397 } else if (bytes/packets > 2000) {
2398 retval = bulk_latency;
2399 } else if (packets <= 2 && bytes < 512) {
2400 retval = lowest_latency;
2401 }
2402 break;
2403 case bulk_latency: /* 250 usec aka 4000 ints/s */
2404 if (bytes > 25000) {
2405 if (packets > 35)
2406 retval = low_latency;
2407 } else if (bytes < 6000) {
2408 retval = low_latency;
2409 }
2410 break;
2411 }
2412
2413 return retval;
2414 }
2415
e1000_set_itr(struct e1000_adapter * adapter)2416 static void e1000_set_itr(struct e1000_adapter *adapter)
2417 {
2418 struct e1000_hw *hw = &adapter->hw;
2419 u16 current_itr;
2420 u32 new_itr = adapter->itr;
2421
2422 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2423 if (adapter->link_speed != SPEED_1000) {
2424 current_itr = 0;
2425 new_itr = 4000;
2426 goto set_itr_now;
2427 }
2428
2429 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2430 new_itr = 0;
2431 goto set_itr_now;
2432 }
2433
2434 adapter->tx_itr = e1000_update_itr(adapter,
2435 adapter->tx_itr,
2436 adapter->total_tx_packets,
2437 adapter->total_tx_bytes);
2438 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2439 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2440 adapter->tx_itr = low_latency;
2441
2442 adapter->rx_itr = e1000_update_itr(adapter,
2443 adapter->rx_itr,
2444 adapter->total_rx_packets,
2445 adapter->total_rx_bytes);
2446 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2447 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2448 adapter->rx_itr = low_latency;
2449
2450 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2451
2452 switch (current_itr) {
2453 /* counts and packets in update_itr are dependent on these numbers */
2454 case lowest_latency:
2455 new_itr = 70000;
2456 break;
2457 case low_latency:
2458 new_itr = 20000; /* aka hwitr = ~200 */
2459 break;
2460 case bulk_latency:
2461 new_itr = 4000;
2462 break;
2463 default:
2464 break;
2465 }
2466
2467 set_itr_now:
2468 if (new_itr != adapter->itr) {
2469 /*
2470 * this attempts to bias the interrupt rate towards Bulk
2471 * by adding intermediate steps when interrupt rate is
2472 * increasing
2473 */
2474 new_itr = new_itr > adapter->itr ?
2475 min(adapter->itr + (new_itr >> 2), new_itr) :
2476 new_itr;
2477 adapter->itr = new_itr;
2478 adapter->rx_ring->itr_val = new_itr;
2479 if (adapter->msix_entries)
2480 adapter->rx_ring->set_itr = 1;
2481 else
2482 if (new_itr)
2483 ew32(ITR, 1000000000 / (new_itr * 256));
2484 else
2485 ew32(ITR, 0);
2486 }
2487 }
2488
2489 /**
2490 * e1000_alloc_queues - Allocate memory for all rings
2491 * @adapter: board private structure to initialize
2492 **/
e1000_alloc_queues(struct e1000_adapter * adapter)2493 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2494 {
2495 int size = sizeof(struct e1000_ring);
2496
2497 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2498 if (!adapter->tx_ring)
2499 goto err;
2500 adapter->tx_ring->count = adapter->tx_ring_count;
2501 adapter->tx_ring->adapter = adapter;
2502
2503 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2504 if (!adapter->rx_ring)
2505 goto err;
2506 adapter->rx_ring->count = adapter->rx_ring_count;
2507 adapter->rx_ring->adapter = adapter;
2508
2509 return 0;
2510 err:
2511 e_err("Unable to allocate memory for queues\n");
2512 kfree(adapter->rx_ring);
2513 kfree(adapter->tx_ring);
2514 return -ENOMEM;
2515 }
2516
2517 /**
2518 * e1000_clean - NAPI Rx polling callback
2519 * @napi: struct associated with this polling callback
2520 * @budget: amount of packets driver is allowed to process this poll
2521 **/
e1000_clean(struct napi_struct * napi,int budget)2522 static int e1000_clean(struct napi_struct *napi, int budget)
2523 {
2524 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
2525 struct e1000_hw *hw = &adapter->hw;
2526 struct net_device *poll_dev = adapter->netdev;
2527 int tx_cleaned = 1, work_done = 0;
2528
2529 adapter = netdev_priv(poll_dev);
2530
2531 if (adapter->msix_entries &&
2532 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2533 goto clean_rx;
2534
2535 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2536
2537 clean_rx:
2538 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2539
2540 if (!tx_cleaned)
2541 work_done = budget;
2542
2543 /* If budget not fully consumed, exit the polling mode */
2544 if (work_done < budget) {
2545 if (adapter->itr_setting & 3)
2546 e1000_set_itr(adapter);
2547 napi_complete(napi);
2548 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2549 if (adapter->msix_entries)
2550 ew32(IMS, adapter->rx_ring->ims_val);
2551 else
2552 e1000_irq_enable(adapter);
2553 }
2554 }
2555
2556 return work_done;
2557 }
2558
e1000_vlan_rx_add_vid(struct net_device * netdev,u16 vid)2559 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2560 {
2561 struct e1000_adapter *adapter = netdev_priv(netdev);
2562 struct e1000_hw *hw = &adapter->hw;
2563 u32 vfta, index;
2564
2565 /* don't update vlan cookie if already programmed */
2566 if ((adapter->hw.mng_cookie.status &
2567 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2568 (vid == adapter->mng_vlan_id))
2569 return 0;
2570
2571 /* add VID to filter table */
2572 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2573 index = (vid >> 5) & 0x7F;
2574 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2575 vfta |= (1 << (vid & 0x1F));
2576 hw->mac.ops.write_vfta(hw, index, vfta);
2577 }
2578
2579 set_bit(vid, adapter->active_vlans);
2580
2581 return 0;
2582 }
2583
e1000_vlan_rx_kill_vid(struct net_device * netdev,u16 vid)2584 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2585 {
2586 struct e1000_adapter *adapter = netdev_priv(netdev);
2587 struct e1000_hw *hw = &adapter->hw;
2588 u32 vfta, index;
2589
2590 if ((adapter->hw.mng_cookie.status &
2591 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2592 (vid == adapter->mng_vlan_id)) {
2593 /* release control to f/w */
2594 e1000e_release_hw_control(adapter);
2595 return 0;
2596 }
2597
2598 /* remove VID from filter table */
2599 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2600 index = (vid >> 5) & 0x7F;
2601 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2602 vfta &= ~(1 << (vid & 0x1F));
2603 hw->mac.ops.write_vfta(hw, index, vfta);
2604 }
2605
2606 clear_bit(vid, adapter->active_vlans);
2607
2608 return 0;
2609 }
2610
2611 /**
2612 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2613 * @adapter: board private structure to initialize
2614 **/
e1000e_vlan_filter_disable(struct e1000_adapter * adapter)2615 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2616 {
2617 struct net_device *netdev = adapter->netdev;
2618 struct e1000_hw *hw = &adapter->hw;
2619 u32 rctl;
2620
2621 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2622 /* disable VLAN receive filtering */
2623 rctl = er32(RCTL);
2624 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2625 ew32(RCTL, rctl);
2626
2627 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2628 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2629 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2630 }
2631 }
2632 }
2633
2634 /**
2635 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2636 * @adapter: board private structure to initialize
2637 **/
e1000e_vlan_filter_enable(struct e1000_adapter * adapter)2638 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2639 {
2640 struct e1000_hw *hw = &adapter->hw;
2641 u32 rctl;
2642
2643 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2644 /* enable VLAN receive filtering */
2645 rctl = er32(RCTL);
2646 rctl |= E1000_RCTL_VFE;
2647 rctl &= ~E1000_RCTL_CFIEN;
2648 ew32(RCTL, rctl);
2649 }
2650 }
2651
2652 /**
2653 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2654 * @adapter: board private structure to initialize
2655 **/
e1000e_vlan_strip_disable(struct e1000_adapter * adapter)2656 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2657 {
2658 struct e1000_hw *hw = &adapter->hw;
2659 u32 ctrl;
2660
2661 /* disable VLAN tag insert/strip */
2662 ctrl = er32(CTRL);
2663 ctrl &= ~E1000_CTRL_VME;
2664 ew32(CTRL, ctrl);
2665 }
2666
2667 /**
2668 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2669 * @adapter: board private structure to initialize
2670 **/
e1000e_vlan_strip_enable(struct e1000_adapter * adapter)2671 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2672 {
2673 struct e1000_hw *hw = &adapter->hw;
2674 u32 ctrl;
2675
2676 /* enable VLAN tag insert/strip */
2677 ctrl = er32(CTRL);
2678 ctrl |= E1000_CTRL_VME;
2679 ew32(CTRL, ctrl);
2680 }
2681
e1000_update_mng_vlan(struct e1000_adapter * adapter)2682 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2683 {
2684 struct net_device *netdev = adapter->netdev;
2685 u16 vid = adapter->hw.mng_cookie.vlan_id;
2686 u16 old_vid = adapter->mng_vlan_id;
2687
2688 if (adapter->hw.mng_cookie.status &
2689 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2690 e1000_vlan_rx_add_vid(netdev, vid);
2691 adapter->mng_vlan_id = vid;
2692 }
2693
2694 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2695 e1000_vlan_rx_kill_vid(netdev, old_vid);
2696 }
2697
e1000_restore_vlan(struct e1000_adapter * adapter)2698 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2699 {
2700 u16 vid;
2701
2702 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2703
2704 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2705 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2706 }
2707
e1000_init_manageability_pt(struct e1000_adapter * adapter)2708 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2709 {
2710 struct e1000_hw *hw = &adapter->hw;
2711 u32 manc, manc2h, mdef, i, j;
2712
2713 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2714 return;
2715
2716 manc = er32(MANC);
2717
2718 /*
2719 * enable receiving management packets to the host. this will probably
2720 * generate destination unreachable messages from the host OS, but
2721 * the packets will be handled on SMBUS
2722 */
2723 manc |= E1000_MANC_EN_MNG2HOST;
2724 manc2h = er32(MANC2H);
2725
2726 switch (hw->mac.type) {
2727 default:
2728 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2729 break;
2730 case e1000_82574:
2731 case e1000_82583:
2732 /*
2733 * Check if IPMI pass-through decision filter already exists;
2734 * if so, enable it.
2735 */
2736 for (i = 0, j = 0; i < 8; i++) {
2737 mdef = er32(MDEF(i));
2738
2739 /* Ignore filters with anything other than IPMI ports */
2740 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2741 continue;
2742
2743 /* Enable this decision filter in MANC2H */
2744 if (mdef)
2745 manc2h |= (1 << i);
2746
2747 j |= mdef;
2748 }
2749
2750 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2751 break;
2752
2753 /* Create new decision filter in an empty filter */
2754 for (i = 0, j = 0; i < 8; i++)
2755 if (er32(MDEF(i)) == 0) {
2756 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2757 E1000_MDEF_PORT_664));
2758 manc2h |= (1 << 1);
2759 j++;
2760 break;
2761 }
2762
2763 if (!j)
2764 e_warn("Unable to create IPMI pass-through filter\n");
2765 break;
2766 }
2767
2768 ew32(MANC2H, manc2h);
2769 ew32(MANC, manc);
2770 }
2771
2772 /**
2773 * e1000_configure_tx - Configure Transmit Unit after Reset
2774 * @adapter: board private structure
2775 *
2776 * Configure the Tx unit of the MAC after a reset.
2777 **/
e1000_configure_tx(struct e1000_adapter * adapter)2778 static void e1000_configure_tx(struct e1000_adapter *adapter)
2779 {
2780 struct e1000_hw *hw = &adapter->hw;
2781 struct e1000_ring *tx_ring = adapter->tx_ring;
2782 u64 tdba;
2783 u32 tdlen, tarc;
2784
2785 /* Setup the HW Tx Head and Tail descriptor pointers */
2786 tdba = tx_ring->dma;
2787 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2788 ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
2789 ew32(TDBAH, (tdba >> 32));
2790 ew32(TDLEN, tdlen);
2791 ew32(TDH, 0);
2792 ew32(TDT, 0);
2793 tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
2794 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
2795
2796 /* Set the Tx Interrupt Delay register */
2797 ew32(TIDV, adapter->tx_int_delay);
2798 /* Tx irq moderation */
2799 ew32(TADV, adapter->tx_abs_int_delay);
2800
2801 if (adapter->flags2 & FLAG2_DMA_BURST) {
2802 u32 txdctl = er32(TXDCTL(0));
2803 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2804 E1000_TXDCTL_WTHRESH);
2805 /*
2806 * set up some performance related parameters to encourage the
2807 * hardware to use the bus more efficiently in bursts, depends
2808 * on the tx_int_delay to be enabled,
2809 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2810 * hthresh = 1 ==> prefetch when one or more available
2811 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2812 * BEWARE: this seems to work but should be considered first if
2813 * there are Tx hangs or other Tx related bugs
2814 */
2815 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2816 ew32(TXDCTL(0), txdctl);
2817 }
2818 /* erratum work around: set txdctl the same for both queues */
2819 ew32(TXDCTL(1), er32(TXDCTL(0)));
2820
2821 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2822 tarc = er32(TARC(0));
2823 /*
2824 * set the speed mode bit, we'll clear it if we're not at
2825 * gigabit link later
2826 */
2827 #define SPEED_MODE_BIT (1 << 21)
2828 tarc |= SPEED_MODE_BIT;
2829 ew32(TARC(0), tarc);
2830 }
2831
2832 /* errata: program both queues to unweighted RR */
2833 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2834 tarc = er32(TARC(0));
2835 tarc |= 1;
2836 ew32(TARC(0), tarc);
2837 tarc = er32(TARC(1));
2838 tarc |= 1;
2839 ew32(TARC(1), tarc);
2840 }
2841
2842 /* Setup Transmit Descriptor Settings for eop descriptor */
2843 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2844
2845 /* only set IDE if we are delaying interrupts using the timers */
2846 if (adapter->tx_int_delay)
2847 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2848
2849 /* enable Report Status bit */
2850 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2851
2852 hw->mac.ops.config_collision_dist(hw);
2853 }
2854
2855 /**
2856 * e1000_setup_rctl - configure the receive control registers
2857 * @adapter: Board private structure
2858 **/
2859 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2860 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
e1000_setup_rctl(struct e1000_adapter * adapter)2861 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2862 {
2863 struct e1000_hw *hw = &adapter->hw;
2864 u32 rctl, rfctl;
2865 u32 pages = 0;
2866
2867 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2868 if (hw->mac.type == e1000_pch2lan) {
2869 s32 ret_val;
2870
2871 if (adapter->netdev->mtu > ETH_DATA_LEN)
2872 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2873 else
2874 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2875
2876 if (ret_val)
2877 e_dbg("failed to enable jumbo frame workaround mode\n");
2878 }
2879
2880 /* Program MC offset vector base */
2881 rctl = er32(RCTL);
2882 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2883 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2884 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2885 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2886
2887 /* Do not Store bad packets */
2888 rctl &= ~E1000_RCTL_SBP;
2889
2890 /* Enable Long Packet receive */
2891 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2892 rctl &= ~E1000_RCTL_LPE;
2893 else
2894 rctl |= E1000_RCTL_LPE;
2895
2896 /* Some systems expect that the CRC is included in SMBUS traffic. The
2897 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2898 * host memory when this is enabled
2899 */
2900 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2901 rctl |= E1000_RCTL_SECRC;
2902
2903 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2904 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2905 u16 phy_data;
2906
2907 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2908 phy_data &= 0xfff8;
2909 phy_data |= (1 << 2);
2910 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2911
2912 e1e_rphy(hw, 22, &phy_data);
2913 phy_data &= 0x0fff;
2914 phy_data |= (1 << 14);
2915 e1e_wphy(hw, 0x10, 0x2823);
2916 e1e_wphy(hw, 0x11, 0x0003);
2917 e1e_wphy(hw, 22, phy_data);
2918 }
2919
2920 /* Setup buffer sizes */
2921 rctl &= ~E1000_RCTL_SZ_4096;
2922 rctl |= E1000_RCTL_BSEX;
2923 switch (adapter->rx_buffer_len) {
2924 case 2048:
2925 default:
2926 rctl |= E1000_RCTL_SZ_2048;
2927 rctl &= ~E1000_RCTL_BSEX;
2928 break;
2929 case 4096:
2930 rctl |= E1000_RCTL_SZ_4096;
2931 break;
2932 case 8192:
2933 rctl |= E1000_RCTL_SZ_8192;
2934 break;
2935 case 16384:
2936 rctl |= E1000_RCTL_SZ_16384;
2937 break;
2938 }
2939
2940 /* Enable Extended Status in all Receive Descriptors */
2941 rfctl = er32(RFCTL);
2942 rfctl |= E1000_RFCTL_EXTEN;
2943
2944 /*
2945 * 82571 and greater support packet-split where the protocol
2946 * header is placed in skb->data and the packet data is
2947 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2948 * In the case of a non-split, skb->data is linearly filled,
2949 * followed by the page buffers. Therefore, skb->data is
2950 * sized to hold the largest protocol header.
2951 *
2952 * allocations using alloc_page take too long for regular MTU
2953 * so only enable packet split for jumbo frames
2954 *
2955 * Using pages when the page size is greater than 16k wastes
2956 * a lot of memory, since we allocate 3 pages at all times
2957 * per packet.
2958 */
2959 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2960 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2961 adapter->rx_ps_pages = pages;
2962 else
2963 adapter->rx_ps_pages = 0;
2964
2965 if (adapter->rx_ps_pages) {
2966 u32 psrctl = 0;
2967
2968 /*
2969 * disable packet split support for IPv6 extension headers,
2970 * because some malformed IPv6 headers can hang the Rx
2971 */
2972 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2973 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2974
2975 /* Enable Packet split descriptors */
2976 rctl |= E1000_RCTL_DTYP_PS;
2977
2978 psrctl |= adapter->rx_ps_bsize0 >>
2979 E1000_PSRCTL_BSIZE0_SHIFT;
2980
2981 switch (adapter->rx_ps_pages) {
2982 case 3:
2983 psrctl |= PAGE_SIZE <<
2984 E1000_PSRCTL_BSIZE3_SHIFT;
2985 case 2:
2986 psrctl |= PAGE_SIZE <<
2987 E1000_PSRCTL_BSIZE2_SHIFT;
2988 case 1:
2989 psrctl |= PAGE_SIZE >>
2990 E1000_PSRCTL_BSIZE1_SHIFT;
2991 break;
2992 }
2993
2994 ew32(PSRCTL, psrctl);
2995 }
2996
2997 /* This is useful for sniffing bad packets. */
2998 if (adapter->netdev->features & NETIF_F_RXALL) {
2999 /* UPE and MPE will be handled by normal PROMISC logic
3000 * in e1000e_set_rx_mode */
3001 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3002 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3003 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3004
3005 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3006 E1000_RCTL_DPF | /* Allow filtered pause */
3007 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3008 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3009 * and that breaks VLANs.
3010 */
3011 }
3012
3013 ew32(RFCTL, rfctl);
3014 ew32(RCTL, rctl);
3015 /* just started the receive unit, no need to restart */
3016 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3017 }
3018
3019 /**
3020 * e1000_configure_rx - Configure Receive Unit after Reset
3021 * @adapter: board private structure
3022 *
3023 * Configure the Rx unit of the MAC after a reset.
3024 **/
e1000_configure_rx(struct e1000_adapter * adapter)3025 static void e1000_configure_rx(struct e1000_adapter *adapter)
3026 {
3027 struct e1000_hw *hw = &adapter->hw;
3028 struct e1000_ring *rx_ring = adapter->rx_ring;
3029 u64 rdba;
3030 u32 rdlen, rctl, rxcsum, ctrl_ext;
3031
3032 if (adapter->rx_ps_pages) {
3033 /* this is a 32 byte descriptor */
3034 rdlen = rx_ring->count *
3035 sizeof(union e1000_rx_desc_packet_split);
3036 adapter->clean_rx = e1000_clean_rx_irq_ps;
3037 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3038 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3039 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3040 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3041 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3042 } else {
3043 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3044 adapter->clean_rx = e1000_clean_rx_irq;
3045 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3046 }
3047
3048 /* disable receives while setting up the descriptors */
3049 rctl = er32(RCTL);
3050 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3051 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3052 e1e_flush();
3053 usleep_range(10000, 20000);
3054
3055 if (adapter->flags2 & FLAG2_DMA_BURST) {
3056 /*
3057 * set the writeback threshold (only takes effect if the RDTR
3058 * is set). set GRAN=1 and write back up to 0x4 worth, and
3059 * enable prefetching of 0x20 Rx descriptors
3060 * granularity = 01
3061 * wthresh = 04,
3062 * hthresh = 04,
3063 * pthresh = 0x20
3064 */
3065 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3066 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3067
3068 /*
3069 * override the delay timers for enabling bursting, only if
3070 * the value was not set by the user via module options
3071 */
3072 if (adapter->rx_int_delay == DEFAULT_RDTR)
3073 adapter->rx_int_delay = BURST_RDTR;
3074 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3075 adapter->rx_abs_int_delay = BURST_RADV;
3076 }
3077
3078 /* set the Receive Delay Timer Register */
3079 ew32(RDTR, adapter->rx_int_delay);
3080
3081 /* irq moderation */
3082 ew32(RADV, adapter->rx_abs_int_delay);
3083 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3084 ew32(ITR, 1000000000 / (adapter->itr * 256));
3085
3086 ctrl_ext = er32(CTRL_EXT);
3087 /* Auto-Mask interrupts upon ICR access */
3088 ctrl_ext |= E1000_CTRL_EXT_IAME;
3089 ew32(IAM, 0xffffffff);
3090 ew32(CTRL_EXT, ctrl_ext);
3091 e1e_flush();
3092
3093 /*
3094 * Setup the HW Rx Head and Tail Descriptor Pointers and
3095 * the Base and Length of the Rx Descriptor Ring
3096 */
3097 rdba = rx_ring->dma;
3098 ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
3099 ew32(RDBAH, (rdba >> 32));
3100 ew32(RDLEN, rdlen);
3101 ew32(RDH, 0);
3102 ew32(RDT, 0);
3103 rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
3104 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
3105
3106 /* Enable Receive Checksum Offload for TCP and UDP */
3107 rxcsum = er32(RXCSUM);
3108 if (adapter->netdev->features & NETIF_F_RXCSUM)
3109 rxcsum |= E1000_RXCSUM_TUOFL;
3110 else
3111 rxcsum &= ~E1000_RXCSUM_TUOFL;
3112 ew32(RXCSUM, rxcsum);
3113
3114 if (adapter->hw.mac.type == e1000_pch2lan) {
3115 /*
3116 * With jumbo frames, excessive C-state transition
3117 * latencies result in dropped transactions.
3118 */
3119 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3120 u32 rxdctl = er32(RXDCTL(0));
3121 ew32(RXDCTL(0), rxdctl | 0x3);
3122 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3123 } else {
3124 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3125 PM_QOS_DEFAULT_VALUE);
3126 }
3127 }
3128
3129 /* Enable Receives */
3130 ew32(RCTL, rctl);
3131 }
3132
3133 /**
3134 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3135 * @netdev: network interface device structure
3136 *
3137 * Writes multicast address list to the MTA hash table.
3138 * Returns: -ENOMEM on failure
3139 * 0 on no addresses written
3140 * X on writing X addresses to MTA
3141 */
e1000e_write_mc_addr_list(struct net_device * netdev)3142 static int e1000e_write_mc_addr_list(struct net_device *netdev)
3143 {
3144 struct e1000_adapter *adapter = netdev_priv(netdev);
3145 struct e1000_hw *hw = &adapter->hw;
3146 struct netdev_hw_addr *ha;
3147 u8 *mta_list;
3148 int i;
3149
3150 if (netdev_mc_empty(netdev)) {
3151 /* nothing to program, so clear mc list */
3152 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3153 return 0;
3154 }
3155
3156 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3157 if (!mta_list)
3158 return -ENOMEM;
3159
3160 /* update_mc_addr_list expects a packed array of only addresses. */
3161 i = 0;
3162 netdev_for_each_mc_addr(ha, netdev)
3163 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3164
3165 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3166 kfree(mta_list);
3167
3168 return netdev_mc_count(netdev);
3169 }
3170
3171 /**
3172 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3173 * @netdev: network interface device structure
3174 *
3175 * Writes unicast address list to the RAR table.
3176 * Returns: -ENOMEM on failure/insufficient address space
3177 * 0 on no addresses written
3178 * X on writing X addresses to the RAR table
3179 **/
e1000e_write_uc_addr_list(struct net_device * netdev)3180 static int e1000e_write_uc_addr_list(struct net_device *netdev)
3181 {
3182 struct e1000_adapter *adapter = netdev_priv(netdev);
3183 struct e1000_hw *hw = &adapter->hw;
3184 unsigned int rar_entries = hw->mac.rar_entry_count;
3185 int count = 0;
3186
3187 /* save a rar entry for our hardware address */
3188 rar_entries--;
3189
3190 /* save a rar entry for the LAA workaround */
3191 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3192 rar_entries--;
3193
3194 /* return ENOMEM indicating insufficient memory for addresses */
3195 if (netdev_uc_count(netdev) > rar_entries)
3196 return -ENOMEM;
3197
3198 if (!netdev_uc_empty(netdev) && rar_entries) {
3199 struct netdev_hw_addr *ha;
3200
3201 /*
3202 * write the addresses in reverse order to avoid write
3203 * combining
3204 */
3205 netdev_for_each_uc_addr(ha, netdev) {
3206 if (!rar_entries)
3207 break;
3208 e1000e_rar_set(hw, ha->addr, rar_entries--);
3209 count++;
3210 }
3211 }
3212
3213 /* zero out the remaining RAR entries not used above */
3214 for (; rar_entries > 0; rar_entries--) {
3215 ew32(RAH(rar_entries), 0);
3216 ew32(RAL(rar_entries), 0);
3217 }
3218 e1e_flush();
3219
3220 return count;
3221 }
3222
3223 /**
3224 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3225 * @netdev: network interface device structure
3226 *
3227 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3228 * address list or the network interface flags are updated. This routine is
3229 * responsible for configuring the hardware for proper unicast, multicast,
3230 * promiscuous mode, and all-multi behavior.
3231 **/
e1000e_set_rx_mode(struct net_device * netdev)3232 static void e1000e_set_rx_mode(struct net_device *netdev)
3233 {
3234 struct e1000_adapter *adapter = netdev_priv(netdev);
3235 struct e1000_hw *hw = &adapter->hw;
3236 u32 rctl;
3237
3238 /* Check for Promiscuous and All Multicast modes */
3239 rctl = er32(RCTL);
3240
3241 /* clear the affected bits */
3242 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3243
3244 if (netdev->flags & IFF_PROMISC) {
3245 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3246 /* Do not hardware filter VLANs in promisc mode */
3247 e1000e_vlan_filter_disable(adapter);
3248 } else {
3249 int count;
3250
3251 if (netdev->flags & IFF_ALLMULTI) {
3252 rctl |= E1000_RCTL_MPE;
3253 } else {
3254 /*
3255 * Write addresses to the MTA, if the attempt fails
3256 * then we should just turn on promiscuous mode so
3257 * that we can at least receive multicast traffic
3258 */
3259 count = e1000e_write_mc_addr_list(netdev);
3260 if (count < 0)
3261 rctl |= E1000_RCTL_MPE;
3262 }
3263 e1000e_vlan_filter_enable(adapter);
3264 /*
3265 * Write addresses to available RAR registers, if there is not
3266 * sufficient space to store all the addresses then enable
3267 * unicast promiscuous mode
3268 */
3269 count = e1000e_write_uc_addr_list(netdev);
3270 if (count < 0)
3271 rctl |= E1000_RCTL_UPE;
3272 }
3273
3274 ew32(RCTL, rctl);
3275
3276 if (netdev->features & NETIF_F_HW_VLAN_RX)
3277 e1000e_vlan_strip_enable(adapter);
3278 else
3279 e1000e_vlan_strip_disable(adapter);
3280 }
3281
e1000e_setup_rss_hash(struct e1000_adapter * adapter)3282 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3283 {
3284 struct e1000_hw *hw = &adapter->hw;
3285 u32 mrqc, rxcsum;
3286 int i;
3287 static const u32 rsskey[10] = {
3288 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3289 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3290 };
3291
3292 /* Fill out hash function seed */
3293 for (i = 0; i < 10; i++)
3294 ew32(RSSRK(i), rsskey[i]);
3295
3296 /* Direct all traffic to queue 0 */
3297 for (i = 0; i < 32; i++)
3298 ew32(RETA(i), 0);
3299
3300 /*
3301 * Disable raw packet checksumming so that RSS hash is placed in
3302 * descriptor on writeback.
3303 */
3304 rxcsum = er32(RXCSUM);
3305 rxcsum |= E1000_RXCSUM_PCSD;
3306
3307 ew32(RXCSUM, rxcsum);
3308
3309 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3310 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3311 E1000_MRQC_RSS_FIELD_IPV6 |
3312 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3313 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3314
3315 ew32(MRQC, mrqc);
3316 }
3317
3318 /**
3319 * e1000_configure - configure the hardware for Rx and Tx
3320 * @adapter: private board structure
3321 **/
e1000_configure(struct e1000_adapter * adapter)3322 static void e1000_configure(struct e1000_adapter *adapter)
3323 {
3324 struct e1000_ring *rx_ring = adapter->rx_ring;
3325
3326 e1000e_set_rx_mode(adapter->netdev);
3327
3328 e1000_restore_vlan(adapter);
3329 e1000_init_manageability_pt(adapter);
3330
3331 e1000_configure_tx(adapter);
3332
3333 if (adapter->netdev->features & NETIF_F_RXHASH)
3334 e1000e_setup_rss_hash(adapter);
3335 e1000_setup_rctl(adapter);
3336 e1000_configure_rx(adapter);
3337 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3338 }
3339
3340 /**
3341 * e1000e_power_up_phy - restore link in case the phy was powered down
3342 * @adapter: address of board private structure
3343 *
3344 * The phy may be powered down to save power and turn off link when the
3345 * driver is unloaded and wake on lan is not enabled (among others)
3346 * *** this routine MUST be followed by a call to e1000e_reset ***
3347 **/
e1000e_power_up_phy(struct e1000_adapter * adapter)3348 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3349 {
3350 if (adapter->hw.phy.ops.power_up)
3351 adapter->hw.phy.ops.power_up(&adapter->hw);
3352
3353 adapter->hw.mac.ops.setup_link(&adapter->hw);
3354 }
3355
3356 /**
3357 * e1000_power_down_phy - Power down the PHY
3358 *
3359 * Power down the PHY so no link is implied when interface is down.
3360 * The PHY cannot be powered down if management or WoL is active.
3361 */
e1000_power_down_phy(struct e1000_adapter * adapter)3362 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3363 {
3364 /* WoL is enabled */
3365 if (adapter->wol)
3366 return;
3367
3368 if (adapter->hw.phy.ops.power_down)
3369 adapter->hw.phy.ops.power_down(&adapter->hw);
3370 }
3371
3372 /**
3373 * e1000e_reset - bring the hardware into a known good state
3374 *
3375 * This function boots the hardware and enables some settings that
3376 * require a configuration cycle of the hardware - those cannot be
3377 * set/changed during runtime. After reset the device needs to be
3378 * properly configured for Rx, Tx etc.
3379 */
e1000e_reset(struct e1000_adapter * adapter)3380 void e1000e_reset(struct e1000_adapter *adapter)
3381 {
3382 struct e1000_mac_info *mac = &adapter->hw.mac;
3383 struct e1000_fc_info *fc = &adapter->hw.fc;
3384 struct e1000_hw *hw = &adapter->hw;
3385 u32 tx_space, min_tx_space, min_rx_space;
3386 u32 pba = adapter->pba;
3387 u16 hwm;
3388
3389 /* reset Packet Buffer Allocation to default */
3390 ew32(PBA, pba);
3391
3392 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3393 /*
3394 * To maintain wire speed transmits, the Tx FIFO should be
3395 * large enough to accommodate two full transmit packets,
3396 * rounded up to the next 1KB and expressed in KB. Likewise,
3397 * the Rx FIFO should be large enough to accommodate at least
3398 * one full receive packet and is similarly rounded up and
3399 * expressed in KB.
3400 */
3401 pba = er32(PBA);
3402 /* upper 16 bits has Tx packet buffer allocation size in KB */
3403 tx_space = pba >> 16;
3404 /* lower 16 bits has Rx packet buffer allocation size in KB */
3405 pba &= 0xffff;
3406 /*
3407 * the Tx fifo also stores 16 bytes of information about the Tx
3408 * but don't include ethernet FCS because hardware appends it
3409 */
3410 min_tx_space = (adapter->max_frame_size +
3411 sizeof(struct e1000_tx_desc) -
3412 ETH_FCS_LEN) * 2;
3413 min_tx_space = ALIGN(min_tx_space, 1024);
3414 min_tx_space >>= 10;
3415 /* software strips receive CRC, so leave room for it */
3416 min_rx_space = adapter->max_frame_size;
3417 min_rx_space = ALIGN(min_rx_space, 1024);
3418 min_rx_space >>= 10;
3419
3420 /*
3421 * If current Tx allocation is less than the min Tx FIFO size,
3422 * and the min Tx FIFO size is less than the current Rx FIFO
3423 * allocation, take space away from current Rx allocation
3424 */
3425 if ((tx_space < min_tx_space) &&
3426 ((min_tx_space - tx_space) < pba)) {
3427 pba -= min_tx_space - tx_space;
3428
3429 /*
3430 * if short on Rx space, Rx wins and must trump Tx
3431 * adjustment or use Early Receive if available
3432 */
3433 if (pba < min_rx_space)
3434 pba = min_rx_space;
3435 }
3436
3437 ew32(PBA, pba);
3438 }
3439
3440 /*
3441 * flow control settings
3442 *
3443 * The high water mark must be low enough to fit one full frame
3444 * (or the size used for early receive) above it in the Rx FIFO.
3445 * Set it to the lower of:
3446 * - 90% of the Rx FIFO size, and
3447 * - the full Rx FIFO size minus one full frame
3448 */
3449 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3450 fc->pause_time = 0xFFFF;
3451 else
3452 fc->pause_time = E1000_FC_PAUSE_TIME;
3453 fc->send_xon = true;
3454 fc->current_mode = fc->requested_mode;
3455
3456 switch (hw->mac.type) {
3457 case e1000_ich9lan:
3458 case e1000_ich10lan:
3459 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3460 pba = 14;
3461 ew32(PBA, pba);
3462 fc->high_water = 0x2800;
3463 fc->low_water = fc->high_water - 8;
3464 break;
3465 }
3466 /* fall-through */
3467 default:
3468 hwm = min(((pba << 10) * 9 / 10),
3469 ((pba << 10) - adapter->max_frame_size));
3470
3471 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3472 fc->low_water = fc->high_water - 8;
3473 break;
3474 case e1000_pchlan:
3475 /*
3476 * Workaround PCH LOM adapter hangs with certain network
3477 * loads. If hangs persist, try disabling Tx flow control.
3478 */
3479 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3480 fc->high_water = 0x3500;
3481 fc->low_water = 0x1500;
3482 } else {
3483 fc->high_water = 0x5000;
3484 fc->low_water = 0x3000;
3485 }
3486 fc->refresh_time = 0x1000;
3487 break;
3488 case e1000_pch2lan:
3489 fc->high_water = 0x05C20;
3490 fc->low_water = 0x05048;
3491 fc->pause_time = 0x0650;
3492 fc->refresh_time = 0x0400;
3493 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3494 pba = 14;
3495 ew32(PBA, pba);
3496 }
3497 break;
3498 }
3499
3500 /*
3501 * Alignment of Tx data is on an arbitrary byte boundary with the
3502 * maximum size per Tx descriptor limited only to the transmit
3503 * allocation of the packet buffer minus 96 bytes with an upper
3504 * limit of 24KB due to receive synchronization limitations.
3505 */
3506 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3507 24 << 10);
3508
3509 /*
3510 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3511 * fit in receive buffer.
3512 */
3513 if (adapter->itr_setting & 0x3) {
3514 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3515 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3516 dev_info(&adapter->pdev->dev,
3517 "Interrupt Throttle Rate turned off\n");
3518 adapter->flags2 |= FLAG2_DISABLE_AIM;
3519 ew32(ITR, 0);
3520 }
3521 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3522 dev_info(&adapter->pdev->dev,
3523 "Interrupt Throttle Rate turned on\n");
3524 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3525 adapter->itr = 20000;
3526 ew32(ITR, 1000000000 / (adapter->itr * 256));
3527 }
3528 }
3529
3530 /* Allow time for pending master requests to run */
3531 mac->ops.reset_hw(hw);
3532
3533 /*
3534 * For parts with AMT enabled, let the firmware know
3535 * that the network interface is in control
3536 */
3537 if (adapter->flags & FLAG_HAS_AMT)
3538 e1000e_get_hw_control(adapter);
3539
3540 ew32(WUC, 0);
3541
3542 if (mac->ops.init_hw(hw))
3543 e_err("Hardware Error\n");
3544
3545 e1000_update_mng_vlan(adapter);
3546
3547 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3548 ew32(VET, ETH_P_8021Q);
3549
3550 e1000e_reset_adaptive(hw);
3551
3552 if (!netif_running(adapter->netdev) &&
3553 !test_bit(__E1000_TESTING, &adapter->state)) {
3554 e1000_power_down_phy(adapter);
3555 return;
3556 }
3557
3558 e1000_get_phy_info(hw);
3559
3560 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3561 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3562 u16 phy_data = 0;
3563 /*
3564 * speed up time to link by disabling smart power down, ignore
3565 * the return value of this function because there is nothing
3566 * different we would do if it failed
3567 */
3568 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3569 phy_data &= ~IGP02E1000_PM_SPD;
3570 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3571 }
3572 }
3573
e1000e_up(struct e1000_adapter * adapter)3574 int e1000e_up(struct e1000_adapter *adapter)
3575 {
3576 struct e1000_hw *hw = &adapter->hw;
3577
3578 /* hardware has been reset, we need to reload some things */
3579 e1000_configure(adapter);
3580
3581 clear_bit(__E1000_DOWN, &adapter->state);
3582
3583 if (adapter->msix_entries)
3584 e1000_configure_msix(adapter);
3585 e1000_irq_enable(adapter);
3586
3587 netif_start_queue(adapter->netdev);
3588
3589 /* fire a link change interrupt to start the watchdog */
3590 if (adapter->msix_entries)
3591 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3592 else
3593 ew32(ICS, E1000_ICS_LSC);
3594
3595 return 0;
3596 }
3597
e1000e_flush_descriptors(struct e1000_adapter * adapter)3598 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3599 {
3600 struct e1000_hw *hw = &adapter->hw;
3601
3602 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3603 return;
3604
3605 /* flush pending descriptor writebacks to memory */
3606 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3607 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3608
3609 /* execute the writes immediately */
3610 e1e_flush();
3611
3612 /*
3613 * due to rare timing issues, write to TIDV/RDTR again to ensure the
3614 * write is successful
3615 */
3616 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3617 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3618
3619 /* execute the writes immediately */
3620 e1e_flush();
3621 }
3622
3623 static void e1000e_update_stats(struct e1000_adapter *adapter);
3624
e1000e_down(struct e1000_adapter * adapter)3625 void e1000e_down(struct e1000_adapter *adapter)
3626 {
3627 struct net_device *netdev = adapter->netdev;
3628 struct e1000_hw *hw = &adapter->hw;
3629 u32 tctl, rctl;
3630
3631 /*
3632 * signal that we're down so the interrupt handler does not
3633 * reschedule our watchdog timer
3634 */
3635 set_bit(__E1000_DOWN, &adapter->state);
3636
3637 /* disable receives in the hardware */
3638 rctl = er32(RCTL);
3639 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3640 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3641 /* flush and sleep below */
3642
3643 netif_stop_queue(netdev);
3644
3645 /* disable transmits in the hardware */
3646 tctl = er32(TCTL);
3647 tctl &= ~E1000_TCTL_EN;
3648 ew32(TCTL, tctl);
3649
3650 /* flush both disables and wait for them to finish */
3651 e1e_flush();
3652 usleep_range(10000, 20000);
3653
3654 e1000_irq_disable(adapter);
3655
3656 del_timer_sync(&adapter->watchdog_timer);
3657 del_timer_sync(&adapter->phy_info_timer);
3658
3659 netif_carrier_off(netdev);
3660
3661 spin_lock(&adapter->stats64_lock);
3662 e1000e_update_stats(adapter);
3663 spin_unlock(&adapter->stats64_lock);
3664
3665 e1000e_flush_descriptors(adapter);
3666 e1000_clean_tx_ring(adapter->tx_ring);
3667 e1000_clean_rx_ring(adapter->rx_ring);
3668
3669 adapter->link_speed = 0;
3670 adapter->link_duplex = 0;
3671
3672 if (!pci_channel_offline(adapter->pdev))
3673 e1000e_reset(adapter);
3674
3675 /*
3676 * TODO: for power management, we could drop the link and
3677 * pci_disable_device here.
3678 */
3679 }
3680
e1000e_reinit_locked(struct e1000_adapter * adapter)3681 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3682 {
3683 might_sleep();
3684 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3685 usleep_range(1000, 2000);
3686 e1000e_down(adapter);
3687 e1000e_up(adapter);
3688 clear_bit(__E1000_RESETTING, &adapter->state);
3689 }
3690
3691 /**
3692 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3693 * @adapter: board private structure to initialize
3694 *
3695 * e1000_sw_init initializes the Adapter private data structure.
3696 * Fields are initialized based on PCI device information and
3697 * OS network device settings (MTU size).
3698 **/
e1000_sw_init(struct e1000_adapter * adapter)3699 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3700 {
3701 struct net_device *netdev = adapter->netdev;
3702
3703 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3704 adapter->rx_ps_bsize0 = 128;
3705 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3706 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3707 adapter->tx_ring_count = E1000_DEFAULT_TXD;
3708 adapter->rx_ring_count = E1000_DEFAULT_RXD;
3709
3710 spin_lock_init(&adapter->stats64_lock);
3711
3712 e1000e_set_interrupt_capability(adapter);
3713
3714 if (e1000_alloc_queues(adapter))
3715 return -ENOMEM;
3716
3717 /* Explicitly disable IRQ since the NIC can be in any state. */
3718 e1000_irq_disable(adapter);
3719
3720 set_bit(__E1000_DOWN, &adapter->state);
3721 return 0;
3722 }
3723
3724 /**
3725 * e1000_intr_msi_test - Interrupt Handler
3726 * @irq: interrupt number
3727 * @data: pointer to a network interface device structure
3728 **/
e1000_intr_msi_test(int irq,void * data)3729 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3730 {
3731 struct net_device *netdev = data;
3732 struct e1000_adapter *adapter = netdev_priv(netdev);
3733 struct e1000_hw *hw = &adapter->hw;
3734 u32 icr = er32(ICR);
3735
3736 e_dbg("icr is %08X\n", icr);
3737 if (icr & E1000_ICR_RXSEQ) {
3738 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3739 wmb();
3740 }
3741
3742 return IRQ_HANDLED;
3743 }
3744
3745 /**
3746 * e1000_test_msi_interrupt - Returns 0 for successful test
3747 * @adapter: board private struct
3748 *
3749 * code flow taken from tg3.c
3750 **/
e1000_test_msi_interrupt(struct e1000_adapter * adapter)3751 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3752 {
3753 struct net_device *netdev = adapter->netdev;
3754 struct e1000_hw *hw = &adapter->hw;
3755 int err;
3756
3757 /* poll_enable hasn't been called yet, so don't need disable */
3758 /* clear any pending events */
3759 er32(ICR);
3760
3761 /* free the real vector and request a test handler */
3762 e1000_free_irq(adapter);
3763 e1000e_reset_interrupt_capability(adapter);
3764
3765 /* Assume that the test fails, if it succeeds then the test
3766 * MSI irq handler will unset this flag */
3767 adapter->flags |= FLAG_MSI_TEST_FAILED;
3768
3769 err = pci_enable_msi(adapter->pdev);
3770 if (err)
3771 goto msi_test_failed;
3772
3773 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3774 netdev->name, netdev);
3775 if (err) {
3776 pci_disable_msi(adapter->pdev);
3777 goto msi_test_failed;
3778 }
3779
3780 wmb();
3781
3782 e1000_irq_enable(adapter);
3783
3784 /* fire an unusual interrupt on the test handler */
3785 ew32(ICS, E1000_ICS_RXSEQ);
3786 e1e_flush();
3787 msleep(100);
3788
3789 e1000_irq_disable(adapter);
3790
3791 rmb();
3792
3793 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3794 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3795 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3796 } else {
3797 e_dbg("MSI interrupt test succeeded!\n");
3798 }
3799
3800 free_irq(adapter->pdev->irq, netdev);
3801 pci_disable_msi(adapter->pdev);
3802
3803 msi_test_failed:
3804 e1000e_set_interrupt_capability(adapter);
3805 return e1000_request_irq(adapter);
3806 }
3807
3808 /**
3809 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3810 * @adapter: board private struct
3811 *
3812 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3813 **/
e1000_test_msi(struct e1000_adapter * adapter)3814 static int e1000_test_msi(struct e1000_adapter *adapter)
3815 {
3816 int err;
3817 u16 pci_cmd;
3818
3819 if (!(adapter->flags & FLAG_MSI_ENABLED))
3820 return 0;
3821
3822 /* disable SERR in case the MSI write causes a master abort */
3823 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3824 if (pci_cmd & PCI_COMMAND_SERR)
3825 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3826 pci_cmd & ~PCI_COMMAND_SERR);
3827
3828 err = e1000_test_msi_interrupt(adapter);
3829
3830 /* re-enable SERR */
3831 if (pci_cmd & PCI_COMMAND_SERR) {
3832 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3833 pci_cmd |= PCI_COMMAND_SERR;
3834 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3835 }
3836
3837 return err;
3838 }
3839
3840 /**
3841 * e1000_open - Called when a network interface is made active
3842 * @netdev: network interface device structure
3843 *
3844 * Returns 0 on success, negative value on failure
3845 *
3846 * The open entry point is called when a network interface is made
3847 * active by the system (IFF_UP). At this point all resources needed
3848 * for transmit and receive operations are allocated, the interrupt
3849 * handler is registered with the OS, the watchdog timer is started,
3850 * and the stack is notified that the interface is ready.
3851 **/
e1000_open(struct net_device * netdev)3852 static int e1000_open(struct net_device *netdev)
3853 {
3854 struct e1000_adapter *adapter = netdev_priv(netdev);
3855 struct e1000_hw *hw = &adapter->hw;
3856 struct pci_dev *pdev = adapter->pdev;
3857 int err;
3858
3859 /* disallow open during test */
3860 if (test_bit(__E1000_TESTING, &adapter->state))
3861 return -EBUSY;
3862
3863 pm_runtime_get_sync(&pdev->dev);
3864
3865 netif_carrier_off(netdev);
3866
3867 /* allocate transmit descriptors */
3868 err = e1000e_setup_tx_resources(adapter->tx_ring);
3869 if (err)
3870 goto err_setup_tx;
3871
3872 /* allocate receive descriptors */
3873 err = e1000e_setup_rx_resources(adapter->rx_ring);
3874 if (err)
3875 goto err_setup_rx;
3876
3877 /*
3878 * If AMT is enabled, let the firmware know that the network
3879 * interface is now open and reset the part to a known state.
3880 */
3881 if (adapter->flags & FLAG_HAS_AMT) {
3882 e1000e_get_hw_control(adapter);
3883 e1000e_reset(adapter);
3884 }
3885
3886 e1000e_power_up_phy(adapter);
3887
3888 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3889 if ((adapter->hw.mng_cookie.status &
3890 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3891 e1000_update_mng_vlan(adapter);
3892
3893 /* DMA latency requirement to workaround jumbo issue */
3894 if (adapter->hw.mac.type == e1000_pch2lan)
3895 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3896 PM_QOS_CPU_DMA_LATENCY,
3897 PM_QOS_DEFAULT_VALUE);
3898
3899 /*
3900 * before we allocate an interrupt, we must be ready to handle it.
3901 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3902 * as soon as we call pci_request_irq, so we have to setup our
3903 * clean_rx handler before we do so.
3904 */
3905 e1000_configure(adapter);
3906
3907 err = e1000_request_irq(adapter);
3908 if (err)
3909 goto err_req_irq;
3910
3911 /*
3912 * Work around PCIe errata with MSI interrupts causing some chipsets to
3913 * ignore e1000e MSI messages, which means we need to test our MSI
3914 * interrupt now
3915 */
3916 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3917 err = e1000_test_msi(adapter);
3918 if (err) {
3919 e_err("Interrupt allocation failed\n");
3920 goto err_req_irq;
3921 }
3922 }
3923
3924 /* From here on the code is the same as e1000e_up() */
3925 clear_bit(__E1000_DOWN, &adapter->state);
3926
3927 napi_enable(&adapter->napi);
3928
3929 e1000_irq_enable(adapter);
3930
3931 adapter->tx_hang_recheck = false;
3932 netif_start_queue(netdev);
3933
3934 adapter->idle_check = true;
3935 pm_runtime_put(&pdev->dev);
3936
3937 /* fire a link status change interrupt to start the watchdog */
3938 if (adapter->msix_entries)
3939 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3940 else
3941 ew32(ICS, E1000_ICS_LSC);
3942
3943 return 0;
3944
3945 err_req_irq:
3946 e1000e_release_hw_control(adapter);
3947 e1000_power_down_phy(adapter);
3948 e1000e_free_rx_resources(adapter->rx_ring);
3949 err_setup_rx:
3950 e1000e_free_tx_resources(adapter->tx_ring);
3951 err_setup_tx:
3952 e1000e_reset(adapter);
3953 pm_runtime_put_sync(&pdev->dev);
3954
3955 return err;
3956 }
3957
3958 /**
3959 * e1000_close - Disables a network interface
3960 * @netdev: network interface device structure
3961 *
3962 * Returns 0, this is not allowed to fail
3963 *
3964 * The close entry point is called when an interface is de-activated
3965 * by the OS. The hardware is still under the drivers control, but
3966 * needs to be disabled. A global MAC reset is issued to stop the
3967 * hardware, and all transmit and receive resources are freed.
3968 **/
e1000_close(struct net_device * netdev)3969 static int e1000_close(struct net_device *netdev)
3970 {
3971 struct e1000_adapter *adapter = netdev_priv(netdev);
3972 struct pci_dev *pdev = adapter->pdev;
3973 int count = E1000_CHECK_RESET_COUNT;
3974
3975 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3976 usleep_range(10000, 20000);
3977
3978 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3979
3980 pm_runtime_get_sync(&pdev->dev);
3981
3982 napi_disable(&adapter->napi);
3983
3984 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3985 e1000e_down(adapter);
3986 e1000_free_irq(adapter);
3987 }
3988 e1000_power_down_phy(adapter);
3989
3990 e1000e_free_tx_resources(adapter->tx_ring);
3991 e1000e_free_rx_resources(adapter->rx_ring);
3992
3993 /*
3994 * kill manageability vlan ID if supported, but not if a vlan with
3995 * the same ID is registered on the host OS (let 8021q kill it)
3996 */
3997 if (adapter->hw.mng_cookie.status &
3998 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3999 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4000
4001 /*
4002 * If AMT is enabled, let the firmware know that the network
4003 * interface is now closed
4004 */
4005 if ((adapter->flags & FLAG_HAS_AMT) &&
4006 !test_bit(__E1000_TESTING, &adapter->state))
4007 e1000e_release_hw_control(adapter);
4008
4009 if (adapter->hw.mac.type == e1000_pch2lan)
4010 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4011
4012 pm_runtime_put_sync(&pdev->dev);
4013
4014 return 0;
4015 }
4016 /**
4017 * e1000_set_mac - Change the Ethernet Address of the NIC
4018 * @netdev: network interface device structure
4019 * @p: pointer to an address structure
4020 *
4021 * Returns 0 on success, negative on failure
4022 **/
e1000_set_mac(struct net_device * netdev,void * p)4023 static int e1000_set_mac(struct net_device *netdev, void *p)
4024 {
4025 struct e1000_adapter *adapter = netdev_priv(netdev);
4026 struct sockaddr *addr = p;
4027
4028 if (!is_valid_ether_addr(addr->sa_data))
4029 return -EADDRNOTAVAIL;
4030
4031 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4032 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4033
4034 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4035
4036 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4037 /* activate the work around */
4038 e1000e_set_laa_state_82571(&adapter->hw, 1);
4039
4040 /*
4041 * Hold a copy of the LAA in RAR[14] This is done so that
4042 * between the time RAR[0] gets clobbered and the time it
4043 * gets fixed (in e1000_watchdog), the actual LAA is in one
4044 * of the RARs and no incoming packets directed to this port
4045 * are dropped. Eventually the LAA will be in RAR[0] and
4046 * RAR[14]
4047 */
4048 e1000e_rar_set(&adapter->hw,
4049 adapter->hw.mac.addr,
4050 adapter->hw.mac.rar_entry_count - 1);
4051 }
4052
4053 return 0;
4054 }
4055
4056 /**
4057 * e1000e_update_phy_task - work thread to update phy
4058 * @work: pointer to our work struct
4059 *
4060 * this worker thread exists because we must acquire a
4061 * semaphore to read the phy, which we could msleep while
4062 * waiting for it, and we can't msleep in a timer.
4063 **/
e1000e_update_phy_task(struct work_struct * work)4064 static void e1000e_update_phy_task(struct work_struct *work)
4065 {
4066 struct e1000_adapter *adapter = container_of(work,
4067 struct e1000_adapter, update_phy_task);
4068
4069 if (test_bit(__E1000_DOWN, &adapter->state))
4070 return;
4071
4072 e1000_get_phy_info(&adapter->hw);
4073 }
4074
4075 /*
4076 * Need to wait a few seconds after link up to get diagnostic information from
4077 * the phy
4078 */
e1000_update_phy_info(unsigned long data)4079 static void e1000_update_phy_info(unsigned long data)
4080 {
4081 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4082
4083 if (test_bit(__E1000_DOWN, &adapter->state))
4084 return;
4085
4086 schedule_work(&adapter->update_phy_task);
4087 }
4088
4089 /**
4090 * e1000e_update_phy_stats - Update the PHY statistics counters
4091 * @adapter: board private structure
4092 *
4093 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4094 **/
e1000e_update_phy_stats(struct e1000_adapter * adapter)4095 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4096 {
4097 struct e1000_hw *hw = &adapter->hw;
4098 s32 ret_val;
4099 u16 phy_data;
4100
4101 ret_val = hw->phy.ops.acquire(hw);
4102 if (ret_val)
4103 return;
4104
4105 /*
4106 * A page set is expensive so check if already on desired page.
4107 * If not, set to the page with the PHY status registers.
4108 */
4109 hw->phy.addr = 1;
4110 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4111 &phy_data);
4112 if (ret_val)
4113 goto release;
4114 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4115 ret_val = hw->phy.ops.set_page(hw,
4116 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4117 if (ret_val)
4118 goto release;
4119 }
4120
4121 /* Single Collision Count */
4122 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4123 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4124 if (!ret_val)
4125 adapter->stats.scc += phy_data;
4126
4127 /* Excessive Collision Count */
4128 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4129 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4130 if (!ret_val)
4131 adapter->stats.ecol += phy_data;
4132
4133 /* Multiple Collision Count */
4134 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4135 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4136 if (!ret_val)
4137 adapter->stats.mcc += phy_data;
4138
4139 /* Late Collision Count */
4140 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4141 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4142 if (!ret_val)
4143 adapter->stats.latecol += phy_data;
4144
4145 /* Collision Count - also used for adaptive IFS */
4146 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4147 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4148 if (!ret_val)
4149 hw->mac.collision_delta = phy_data;
4150
4151 /* Defer Count */
4152 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4153 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4154 if (!ret_val)
4155 adapter->stats.dc += phy_data;
4156
4157 /* Transmit with no CRS */
4158 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4159 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4160 if (!ret_val)
4161 adapter->stats.tncrs += phy_data;
4162
4163 release:
4164 hw->phy.ops.release(hw);
4165 }
4166
4167 /**
4168 * e1000e_update_stats - Update the board statistics counters
4169 * @adapter: board private structure
4170 **/
e1000e_update_stats(struct e1000_adapter * adapter)4171 static void e1000e_update_stats(struct e1000_adapter *adapter)
4172 {
4173 struct net_device *netdev = adapter->netdev;
4174 struct e1000_hw *hw = &adapter->hw;
4175 struct pci_dev *pdev = adapter->pdev;
4176
4177 /*
4178 * Prevent stats update while adapter is being reset, or if the pci
4179 * connection is down.
4180 */
4181 if (adapter->link_speed == 0)
4182 return;
4183 if (pci_channel_offline(pdev))
4184 return;
4185
4186 adapter->stats.crcerrs += er32(CRCERRS);
4187 adapter->stats.gprc += er32(GPRC);
4188 adapter->stats.gorc += er32(GORCL);
4189 er32(GORCH); /* Clear gorc */
4190 adapter->stats.bprc += er32(BPRC);
4191 adapter->stats.mprc += er32(MPRC);
4192 adapter->stats.roc += er32(ROC);
4193
4194 adapter->stats.mpc += er32(MPC);
4195
4196 /* Half-duplex statistics */
4197 if (adapter->link_duplex == HALF_DUPLEX) {
4198 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4199 e1000e_update_phy_stats(adapter);
4200 } else {
4201 adapter->stats.scc += er32(SCC);
4202 adapter->stats.ecol += er32(ECOL);
4203 adapter->stats.mcc += er32(MCC);
4204 adapter->stats.latecol += er32(LATECOL);
4205 adapter->stats.dc += er32(DC);
4206
4207 hw->mac.collision_delta = er32(COLC);
4208
4209 if ((hw->mac.type != e1000_82574) &&
4210 (hw->mac.type != e1000_82583))
4211 adapter->stats.tncrs += er32(TNCRS);
4212 }
4213 adapter->stats.colc += hw->mac.collision_delta;
4214 }
4215
4216 adapter->stats.xonrxc += er32(XONRXC);
4217 adapter->stats.xontxc += er32(XONTXC);
4218 adapter->stats.xoffrxc += er32(XOFFRXC);
4219 adapter->stats.xofftxc += er32(XOFFTXC);
4220 adapter->stats.gptc += er32(GPTC);
4221 adapter->stats.gotc += er32(GOTCL);
4222 er32(GOTCH); /* Clear gotc */
4223 adapter->stats.rnbc += er32(RNBC);
4224 adapter->stats.ruc += er32(RUC);
4225
4226 adapter->stats.mptc += er32(MPTC);
4227 adapter->stats.bptc += er32(BPTC);
4228
4229 /* used for adaptive IFS */
4230
4231 hw->mac.tx_packet_delta = er32(TPT);
4232 adapter->stats.tpt += hw->mac.tx_packet_delta;
4233
4234 adapter->stats.algnerrc += er32(ALGNERRC);
4235 adapter->stats.rxerrc += er32(RXERRC);
4236 adapter->stats.cexterr += er32(CEXTERR);
4237 adapter->stats.tsctc += er32(TSCTC);
4238 adapter->stats.tsctfc += er32(TSCTFC);
4239
4240 /* Fill out the OS statistics structure */
4241 netdev->stats.multicast = adapter->stats.mprc;
4242 netdev->stats.collisions = adapter->stats.colc;
4243
4244 /* Rx Errors */
4245
4246 /*
4247 * RLEC on some newer hardware can be incorrect so build
4248 * our own version based on RUC and ROC
4249 */
4250 netdev->stats.rx_errors = adapter->stats.rxerrc +
4251 adapter->stats.crcerrs + adapter->stats.algnerrc +
4252 adapter->stats.ruc + adapter->stats.roc +
4253 adapter->stats.cexterr;
4254 netdev->stats.rx_length_errors = adapter->stats.ruc +
4255 adapter->stats.roc;
4256 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4257 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4258 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4259
4260 /* Tx Errors */
4261 netdev->stats.tx_errors = adapter->stats.ecol +
4262 adapter->stats.latecol;
4263 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4264 netdev->stats.tx_window_errors = adapter->stats.latecol;
4265 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4266
4267 /* Tx Dropped needs to be maintained elsewhere */
4268
4269 /* Management Stats */
4270 adapter->stats.mgptc += er32(MGTPTC);
4271 adapter->stats.mgprc += er32(MGTPRC);
4272 adapter->stats.mgpdc += er32(MGTPDC);
4273 }
4274
4275 /**
4276 * e1000_phy_read_status - Update the PHY register status snapshot
4277 * @adapter: board private structure
4278 **/
e1000_phy_read_status(struct e1000_adapter * adapter)4279 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4280 {
4281 struct e1000_hw *hw = &adapter->hw;
4282 struct e1000_phy_regs *phy = &adapter->phy_regs;
4283
4284 if ((er32(STATUS) & E1000_STATUS_LU) &&
4285 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4286 int ret_val;
4287
4288 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4289 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4290 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4291 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4292 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4293 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4294 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4295 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4296 if (ret_val)
4297 e_warn("Error reading PHY register\n");
4298 } else {
4299 /*
4300 * Do not read PHY registers if link is not up
4301 * Set values to typical power-on defaults
4302 */
4303 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4304 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4305 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4306 BMSR_ERCAP);
4307 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4308 ADVERTISE_ALL | ADVERTISE_CSMA);
4309 phy->lpa = 0;
4310 phy->expansion = EXPANSION_ENABLENPAGE;
4311 phy->ctrl1000 = ADVERTISE_1000FULL;
4312 phy->stat1000 = 0;
4313 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4314 }
4315 }
4316
e1000_print_link_info(struct e1000_adapter * adapter)4317 static void e1000_print_link_info(struct e1000_adapter *adapter)
4318 {
4319 struct e1000_hw *hw = &adapter->hw;
4320 u32 ctrl = er32(CTRL);
4321
4322 /* Link status message must follow this format for user tools */
4323 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4324 adapter->netdev->name,
4325 adapter->link_speed,
4326 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4327 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4328 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4329 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4330 }
4331
e1000e_has_link(struct e1000_adapter * adapter)4332 static bool e1000e_has_link(struct e1000_adapter *adapter)
4333 {
4334 struct e1000_hw *hw = &adapter->hw;
4335 bool link_active = false;
4336 s32 ret_val = 0;
4337
4338 /*
4339 * get_link_status is set on LSC (link status) interrupt or
4340 * Rx sequence error interrupt. get_link_status will stay
4341 * false until the check_for_link establishes link
4342 * for copper adapters ONLY
4343 */
4344 switch (hw->phy.media_type) {
4345 case e1000_media_type_copper:
4346 if (hw->mac.get_link_status) {
4347 ret_val = hw->mac.ops.check_for_link(hw);
4348 link_active = !hw->mac.get_link_status;
4349 } else {
4350 link_active = true;
4351 }
4352 break;
4353 case e1000_media_type_fiber:
4354 ret_val = hw->mac.ops.check_for_link(hw);
4355 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4356 break;
4357 case e1000_media_type_internal_serdes:
4358 ret_val = hw->mac.ops.check_for_link(hw);
4359 link_active = adapter->hw.mac.serdes_has_link;
4360 break;
4361 default:
4362 case e1000_media_type_unknown:
4363 break;
4364 }
4365
4366 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4367 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4368 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4369 e_info("Gigabit has been disabled, downgrading speed\n");
4370 }
4371
4372 return link_active;
4373 }
4374
e1000e_enable_receives(struct e1000_adapter * adapter)4375 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4376 {
4377 /* make sure the receive unit is started */
4378 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4379 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4380 struct e1000_hw *hw = &adapter->hw;
4381 u32 rctl = er32(RCTL);
4382 ew32(RCTL, rctl | E1000_RCTL_EN);
4383 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4384 }
4385 }
4386
e1000e_check_82574_phy_workaround(struct e1000_adapter * adapter)4387 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4388 {
4389 struct e1000_hw *hw = &adapter->hw;
4390
4391 /*
4392 * With 82574 controllers, PHY needs to be checked periodically
4393 * for hung state and reset, if two calls return true
4394 */
4395 if (e1000_check_phy_82574(hw))
4396 adapter->phy_hang_count++;
4397 else
4398 adapter->phy_hang_count = 0;
4399
4400 if (adapter->phy_hang_count > 1) {
4401 adapter->phy_hang_count = 0;
4402 schedule_work(&adapter->reset_task);
4403 }
4404 }
4405
4406 /**
4407 * e1000_watchdog - Timer Call-back
4408 * @data: pointer to adapter cast into an unsigned long
4409 **/
e1000_watchdog(unsigned long data)4410 static void e1000_watchdog(unsigned long data)
4411 {
4412 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4413
4414 /* Do the rest outside of interrupt context */
4415 schedule_work(&adapter->watchdog_task);
4416
4417 /* TODO: make this use queue_delayed_work() */
4418 }
4419
e1000_watchdog_task(struct work_struct * work)4420 static void e1000_watchdog_task(struct work_struct *work)
4421 {
4422 struct e1000_adapter *adapter = container_of(work,
4423 struct e1000_adapter, watchdog_task);
4424 struct net_device *netdev = adapter->netdev;
4425 struct e1000_mac_info *mac = &adapter->hw.mac;
4426 struct e1000_phy_info *phy = &adapter->hw.phy;
4427 struct e1000_ring *tx_ring = adapter->tx_ring;
4428 struct e1000_hw *hw = &adapter->hw;
4429 u32 link, tctl;
4430
4431 if (test_bit(__E1000_DOWN, &adapter->state))
4432 return;
4433
4434 link = e1000e_has_link(adapter);
4435 if ((netif_carrier_ok(netdev)) && link) {
4436 /* Cancel scheduled suspend requests. */
4437 pm_runtime_resume(netdev->dev.parent);
4438
4439 e1000e_enable_receives(adapter);
4440 goto link_up;
4441 }
4442
4443 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4444 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4445 e1000_update_mng_vlan(adapter);
4446
4447 if (link) {
4448 if (!netif_carrier_ok(netdev)) {
4449 bool txb2b = true;
4450
4451 /* Cancel scheduled suspend requests. */
4452 pm_runtime_resume(netdev->dev.parent);
4453
4454 /* update snapshot of PHY registers on LSC */
4455 e1000_phy_read_status(adapter);
4456 mac->ops.get_link_up_info(&adapter->hw,
4457 &adapter->link_speed,
4458 &adapter->link_duplex);
4459 e1000_print_link_info(adapter);
4460 /*
4461 * On supported PHYs, check for duplex mismatch only
4462 * if link has autonegotiated at 10/100 half
4463 */
4464 if ((hw->phy.type == e1000_phy_igp_3 ||
4465 hw->phy.type == e1000_phy_bm) &&
4466 (hw->mac.autoneg == true) &&
4467 (adapter->link_speed == SPEED_10 ||
4468 adapter->link_speed == SPEED_100) &&
4469 (adapter->link_duplex == HALF_DUPLEX)) {
4470 u16 autoneg_exp;
4471
4472 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4473
4474 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4475 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4476 }
4477
4478 /* adjust timeout factor according to speed/duplex */
4479 adapter->tx_timeout_factor = 1;
4480 switch (adapter->link_speed) {
4481 case SPEED_10:
4482 txb2b = false;
4483 adapter->tx_timeout_factor = 16;
4484 break;
4485 case SPEED_100:
4486 txb2b = false;
4487 adapter->tx_timeout_factor = 10;
4488 break;
4489 }
4490
4491 /*
4492 * workaround: re-program speed mode bit after
4493 * link-up event
4494 */
4495 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4496 !txb2b) {
4497 u32 tarc0;
4498 tarc0 = er32(TARC(0));
4499 tarc0 &= ~SPEED_MODE_BIT;
4500 ew32(TARC(0), tarc0);
4501 }
4502
4503 /*
4504 * disable TSO for pcie and 10/100 speeds, to avoid
4505 * some hardware issues
4506 */
4507 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4508 switch (adapter->link_speed) {
4509 case SPEED_10:
4510 case SPEED_100:
4511 e_info("10/100 speed: disabling TSO\n");
4512 netdev->features &= ~NETIF_F_TSO;
4513 netdev->features &= ~NETIF_F_TSO6;
4514 break;
4515 case SPEED_1000:
4516 netdev->features |= NETIF_F_TSO;
4517 netdev->features |= NETIF_F_TSO6;
4518 break;
4519 default:
4520 /* oops */
4521 break;
4522 }
4523 }
4524
4525 /*
4526 * enable transmits in the hardware, need to do this
4527 * after setting TARC(0)
4528 */
4529 tctl = er32(TCTL);
4530 tctl |= E1000_TCTL_EN;
4531 ew32(TCTL, tctl);
4532
4533 /*
4534 * Perform any post-link-up configuration before
4535 * reporting link up.
4536 */
4537 if (phy->ops.cfg_on_link_up)
4538 phy->ops.cfg_on_link_up(hw);
4539
4540 netif_carrier_on(netdev);
4541
4542 if (!test_bit(__E1000_DOWN, &adapter->state))
4543 mod_timer(&adapter->phy_info_timer,
4544 round_jiffies(jiffies + 2 * HZ));
4545 }
4546 } else {
4547 if (netif_carrier_ok(netdev)) {
4548 adapter->link_speed = 0;
4549 adapter->link_duplex = 0;
4550 /* Link status message must follow this format */
4551 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4552 adapter->netdev->name);
4553 netif_carrier_off(netdev);
4554 if (!test_bit(__E1000_DOWN, &adapter->state))
4555 mod_timer(&adapter->phy_info_timer,
4556 round_jiffies(jiffies + 2 * HZ));
4557
4558 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4559 schedule_work(&adapter->reset_task);
4560 else
4561 pm_schedule_suspend(netdev->dev.parent,
4562 LINK_TIMEOUT);
4563 }
4564 }
4565
4566 link_up:
4567 spin_lock(&adapter->stats64_lock);
4568 e1000e_update_stats(adapter);
4569
4570 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4571 adapter->tpt_old = adapter->stats.tpt;
4572 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4573 adapter->colc_old = adapter->stats.colc;
4574
4575 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4576 adapter->gorc_old = adapter->stats.gorc;
4577 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4578 adapter->gotc_old = adapter->stats.gotc;
4579 spin_unlock(&adapter->stats64_lock);
4580
4581 e1000e_update_adaptive(&adapter->hw);
4582
4583 if (!netif_carrier_ok(netdev) &&
4584 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4585 /*
4586 * We've lost link, so the controller stops DMA,
4587 * but we've got queued Tx work that's never going
4588 * to get done, so reset controller to flush Tx.
4589 * (Do the reset outside of interrupt context).
4590 */
4591 schedule_work(&adapter->reset_task);
4592 /* return immediately since reset is imminent */
4593 return;
4594 }
4595
4596 /* Simple mode for Interrupt Throttle Rate (ITR) */
4597 if (adapter->itr_setting == 4) {
4598 /*
4599 * Symmetric Tx/Rx gets a reduced ITR=2000;
4600 * Total asymmetrical Tx or Rx gets ITR=8000;
4601 * everyone else is between 2000-8000.
4602 */
4603 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4604 u32 dif = (adapter->gotc > adapter->gorc ?
4605 adapter->gotc - adapter->gorc :
4606 adapter->gorc - adapter->gotc) / 10000;
4607 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4608
4609 ew32(ITR, 1000000000 / (itr * 256));
4610 }
4611
4612 /* Cause software interrupt to ensure Rx ring is cleaned */
4613 if (adapter->msix_entries)
4614 ew32(ICS, adapter->rx_ring->ims_val);
4615 else
4616 ew32(ICS, E1000_ICS_RXDMT0);
4617
4618 /* flush pending descriptors to memory before detecting Tx hang */
4619 e1000e_flush_descriptors(adapter);
4620
4621 /* Force detection of hung controller every watchdog period */
4622 adapter->detect_tx_hung = true;
4623
4624 /*
4625 * With 82571 controllers, LAA may be overwritten due to controller
4626 * reset from the other port. Set the appropriate LAA in RAR[0]
4627 */
4628 if (e1000e_get_laa_state_82571(hw))
4629 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4630
4631 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4632 e1000e_check_82574_phy_workaround(adapter);
4633
4634 /* Reset the timer */
4635 if (!test_bit(__E1000_DOWN, &adapter->state))
4636 mod_timer(&adapter->watchdog_timer,
4637 round_jiffies(jiffies + 2 * HZ));
4638 }
4639
4640 #define E1000_TX_FLAGS_CSUM 0x00000001
4641 #define E1000_TX_FLAGS_VLAN 0x00000002
4642 #define E1000_TX_FLAGS_TSO 0x00000004
4643 #define E1000_TX_FLAGS_IPV4 0x00000008
4644 #define E1000_TX_FLAGS_NO_FCS 0x00000010
4645 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4646 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4647
e1000_tso(struct e1000_ring * tx_ring,struct sk_buff * skb)4648 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4649 {
4650 struct e1000_context_desc *context_desc;
4651 struct e1000_buffer *buffer_info;
4652 unsigned int i;
4653 u32 cmd_length = 0;
4654 u16 ipcse = 0, tucse, mss;
4655 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4656
4657 if (!skb_is_gso(skb))
4658 return 0;
4659
4660 if (skb_header_cloned(skb)) {
4661 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4662
4663 if (err)
4664 return err;
4665 }
4666
4667 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4668 mss = skb_shinfo(skb)->gso_size;
4669 if (skb->protocol == htons(ETH_P_IP)) {
4670 struct iphdr *iph = ip_hdr(skb);
4671 iph->tot_len = 0;
4672 iph->check = 0;
4673 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4674 0, IPPROTO_TCP, 0);
4675 cmd_length = E1000_TXD_CMD_IP;
4676 ipcse = skb_transport_offset(skb) - 1;
4677 } else if (skb_is_gso_v6(skb)) {
4678 ipv6_hdr(skb)->payload_len = 0;
4679 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4680 &ipv6_hdr(skb)->daddr,
4681 0, IPPROTO_TCP, 0);
4682 ipcse = 0;
4683 }
4684 ipcss = skb_network_offset(skb);
4685 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4686 tucss = skb_transport_offset(skb);
4687 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4688 tucse = 0;
4689
4690 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4691 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4692
4693 i = tx_ring->next_to_use;
4694 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4695 buffer_info = &tx_ring->buffer_info[i];
4696
4697 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4698 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4699 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4700 context_desc->upper_setup.tcp_fields.tucss = tucss;
4701 context_desc->upper_setup.tcp_fields.tucso = tucso;
4702 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
4703 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4704 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4705 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4706
4707 buffer_info->time_stamp = jiffies;
4708 buffer_info->next_to_watch = i;
4709
4710 i++;
4711 if (i == tx_ring->count)
4712 i = 0;
4713 tx_ring->next_to_use = i;
4714
4715 return 1;
4716 }
4717
e1000_tx_csum(struct e1000_ring * tx_ring,struct sk_buff * skb)4718 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4719 {
4720 struct e1000_adapter *adapter = tx_ring->adapter;
4721 struct e1000_context_desc *context_desc;
4722 struct e1000_buffer *buffer_info;
4723 unsigned int i;
4724 u8 css;
4725 u32 cmd_len = E1000_TXD_CMD_DEXT;
4726 __be16 protocol;
4727
4728 if (skb->ip_summed != CHECKSUM_PARTIAL)
4729 return 0;
4730
4731 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4732 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4733 else
4734 protocol = skb->protocol;
4735
4736 switch (protocol) {
4737 case cpu_to_be16(ETH_P_IP):
4738 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4739 cmd_len |= E1000_TXD_CMD_TCP;
4740 break;
4741 case cpu_to_be16(ETH_P_IPV6):
4742 /* XXX not handling all IPV6 headers */
4743 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4744 cmd_len |= E1000_TXD_CMD_TCP;
4745 break;
4746 default:
4747 if (unlikely(net_ratelimit()))
4748 e_warn("checksum_partial proto=%x!\n",
4749 be16_to_cpu(protocol));
4750 break;
4751 }
4752
4753 css = skb_checksum_start_offset(skb);
4754
4755 i = tx_ring->next_to_use;
4756 buffer_info = &tx_ring->buffer_info[i];
4757 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4758
4759 context_desc->lower_setup.ip_config = 0;
4760 context_desc->upper_setup.tcp_fields.tucss = css;
4761 context_desc->upper_setup.tcp_fields.tucso =
4762 css + skb->csum_offset;
4763 context_desc->upper_setup.tcp_fields.tucse = 0;
4764 context_desc->tcp_seg_setup.data = 0;
4765 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4766
4767 buffer_info->time_stamp = jiffies;
4768 buffer_info->next_to_watch = i;
4769
4770 i++;
4771 if (i == tx_ring->count)
4772 i = 0;
4773 tx_ring->next_to_use = i;
4774
4775 return 1;
4776 }
4777
e1000_tx_map(struct e1000_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags)4778 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4779 unsigned int first, unsigned int max_per_txd,
4780 unsigned int nr_frags)
4781 {
4782 struct e1000_adapter *adapter = tx_ring->adapter;
4783 struct pci_dev *pdev = adapter->pdev;
4784 struct e1000_buffer *buffer_info;
4785 unsigned int len = skb_headlen(skb);
4786 unsigned int offset = 0, size, count = 0, i;
4787 unsigned int f, bytecount, segs;
4788
4789 i = tx_ring->next_to_use;
4790
4791 while (len) {
4792 buffer_info = &tx_ring->buffer_info[i];
4793 size = min(len, max_per_txd);
4794
4795 buffer_info->length = size;
4796 buffer_info->time_stamp = jiffies;
4797 buffer_info->next_to_watch = i;
4798 buffer_info->dma = dma_map_single(&pdev->dev,
4799 skb->data + offset,
4800 size, DMA_TO_DEVICE);
4801 buffer_info->mapped_as_page = false;
4802 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4803 goto dma_error;
4804
4805 len -= size;
4806 offset += size;
4807 count++;
4808
4809 if (len) {
4810 i++;
4811 if (i == tx_ring->count)
4812 i = 0;
4813 }
4814 }
4815
4816 for (f = 0; f < nr_frags; f++) {
4817 const struct skb_frag_struct *frag;
4818
4819 frag = &skb_shinfo(skb)->frags[f];
4820 len = skb_frag_size(frag);
4821 offset = 0;
4822
4823 while (len) {
4824 i++;
4825 if (i == tx_ring->count)
4826 i = 0;
4827
4828 buffer_info = &tx_ring->buffer_info[i];
4829 size = min(len, max_per_txd);
4830
4831 buffer_info->length = size;
4832 buffer_info->time_stamp = jiffies;
4833 buffer_info->next_to_watch = i;
4834 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4835 offset, size, DMA_TO_DEVICE);
4836 buffer_info->mapped_as_page = true;
4837 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4838 goto dma_error;
4839
4840 len -= size;
4841 offset += size;
4842 count++;
4843 }
4844 }
4845
4846 segs = skb_shinfo(skb)->gso_segs ? : 1;
4847 /* multiply data chunks by size of headers */
4848 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4849
4850 tx_ring->buffer_info[i].skb = skb;
4851 tx_ring->buffer_info[i].segs = segs;
4852 tx_ring->buffer_info[i].bytecount = bytecount;
4853 tx_ring->buffer_info[first].next_to_watch = i;
4854
4855 return count;
4856
4857 dma_error:
4858 dev_err(&pdev->dev, "Tx DMA map failed\n");
4859 buffer_info->dma = 0;
4860 if (count)
4861 count--;
4862
4863 while (count--) {
4864 if (i == 0)
4865 i += tx_ring->count;
4866 i--;
4867 buffer_info = &tx_ring->buffer_info[i];
4868 e1000_put_txbuf(tx_ring, buffer_info);
4869 }
4870
4871 return 0;
4872 }
4873
e1000_tx_queue(struct e1000_ring * tx_ring,int tx_flags,int count)4874 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4875 {
4876 struct e1000_adapter *adapter = tx_ring->adapter;
4877 struct e1000_tx_desc *tx_desc = NULL;
4878 struct e1000_buffer *buffer_info;
4879 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4880 unsigned int i;
4881
4882 if (tx_flags & E1000_TX_FLAGS_TSO) {
4883 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4884 E1000_TXD_CMD_TSE;
4885 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4886
4887 if (tx_flags & E1000_TX_FLAGS_IPV4)
4888 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4889 }
4890
4891 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4892 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4893 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4894 }
4895
4896 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4897 txd_lower |= E1000_TXD_CMD_VLE;
4898 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4899 }
4900
4901 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4902 txd_lower &= ~(E1000_TXD_CMD_IFCS);
4903
4904 i = tx_ring->next_to_use;
4905
4906 do {
4907 buffer_info = &tx_ring->buffer_info[i];
4908 tx_desc = E1000_TX_DESC(*tx_ring, i);
4909 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4910 tx_desc->lower.data =
4911 cpu_to_le32(txd_lower | buffer_info->length);
4912 tx_desc->upper.data = cpu_to_le32(txd_upper);
4913
4914 i++;
4915 if (i == tx_ring->count)
4916 i = 0;
4917 } while (--count > 0);
4918
4919 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4920
4921 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
4922 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4923 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
4924
4925 /*
4926 * Force memory writes to complete before letting h/w
4927 * know there are new descriptors to fetch. (Only
4928 * applicable for weak-ordered memory model archs,
4929 * such as IA-64).
4930 */
4931 wmb();
4932
4933 tx_ring->next_to_use = i;
4934
4935 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4936 e1000e_update_tdt_wa(tx_ring, i);
4937 else
4938 writel(i, tx_ring->tail);
4939
4940 /*
4941 * we need this if more than one processor can write to our tail
4942 * at a time, it synchronizes IO on IA64/Altix systems
4943 */
4944 mmiowb();
4945 }
4946
4947 #define MINIMUM_DHCP_PACKET_SIZE 282
e1000_transfer_dhcp_info(struct e1000_adapter * adapter,struct sk_buff * skb)4948 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4949 struct sk_buff *skb)
4950 {
4951 struct e1000_hw *hw = &adapter->hw;
4952 u16 length, offset;
4953
4954 if (vlan_tx_tag_present(skb)) {
4955 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4956 (adapter->hw.mng_cookie.status &
4957 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4958 return 0;
4959 }
4960
4961 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4962 return 0;
4963
4964 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4965 return 0;
4966
4967 {
4968 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4969 struct udphdr *udp;
4970
4971 if (ip->protocol != IPPROTO_UDP)
4972 return 0;
4973
4974 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4975 if (ntohs(udp->dest) != 67)
4976 return 0;
4977
4978 offset = (u8 *)udp + 8 - skb->data;
4979 length = skb->len - offset;
4980 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4981 }
4982
4983 return 0;
4984 }
4985
__e1000_maybe_stop_tx(struct e1000_ring * tx_ring,int size)4986 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4987 {
4988 struct e1000_adapter *adapter = tx_ring->adapter;
4989
4990 netif_stop_queue(adapter->netdev);
4991 /*
4992 * Herbert's original patch had:
4993 * smp_mb__after_netif_stop_queue();
4994 * but since that doesn't exist yet, just open code it.
4995 */
4996 smp_mb();
4997
4998 /*
4999 * We need to check again in a case another CPU has just
5000 * made room available.
5001 */
5002 if (e1000_desc_unused(tx_ring) < size)
5003 return -EBUSY;
5004
5005 /* A reprieve! */
5006 netif_start_queue(adapter->netdev);
5007 ++adapter->restart_queue;
5008 return 0;
5009 }
5010
e1000_maybe_stop_tx(struct e1000_ring * tx_ring,int size)5011 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5012 {
5013 BUG_ON(size > tx_ring->count);
5014
5015 if (e1000_desc_unused(tx_ring) >= size)
5016 return 0;
5017 return __e1000_maybe_stop_tx(tx_ring, size);
5018 }
5019
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)5020 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5021 struct net_device *netdev)
5022 {
5023 struct e1000_adapter *adapter = netdev_priv(netdev);
5024 struct e1000_ring *tx_ring = adapter->tx_ring;
5025 unsigned int first;
5026 unsigned int tx_flags = 0;
5027 unsigned int len = skb_headlen(skb);
5028 unsigned int nr_frags;
5029 unsigned int mss;
5030 int count = 0;
5031 int tso;
5032 unsigned int f;
5033
5034 if (test_bit(__E1000_DOWN, &adapter->state)) {
5035 dev_kfree_skb_any(skb);
5036 return NETDEV_TX_OK;
5037 }
5038
5039 if (skb->len <= 0) {
5040 dev_kfree_skb_any(skb);
5041 return NETDEV_TX_OK;
5042 }
5043
5044 mss = skb_shinfo(skb)->gso_size;
5045 if (mss) {
5046 u8 hdr_len;
5047
5048 /*
5049 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
5050 * points to just header, pull a few bytes of payload from
5051 * frags into skb->data
5052 */
5053 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5054 /*
5055 * we do this workaround for ES2LAN, but it is un-necessary,
5056 * avoiding it could save a lot of cycles
5057 */
5058 if (skb->data_len && (hdr_len == len)) {
5059 unsigned int pull_size;
5060
5061 pull_size = min_t(unsigned int, 4, skb->data_len);
5062 if (!__pskb_pull_tail(skb, pull_size)) {
5063 e_err("__pskb_pull_tail failed.\n");
5064 dev_kfree_skb_any(skb);
5065 return NETDEV_TX_OK;
5066 }
5067 len = skb_headlen(skb);
5068 }
5069 }
5070
5071 /* reserve a descriptor for the offload context */
5072 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5073 count++;
5074 count++;
5075
5076 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5077
5078 nr_frags = skb_shinfo(skb)->nr_frags;
5079 for (f = 0; f < nr_frags; f++)
5080 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5081 adapter->tx_fifo_limit);
5082
5083 if (adapter->hw.mac.tx_pkt_filtering)
5084 e1000_transfer_dhcp_info(adapter, skb);
5085
5086 /*
5087 * need: count + 2 desc gap to keep tail from touching
5088 * head, otherwise try next time
5089 */
5090 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5091 return NETDEV_TX_BUSY;
5092
5093 if (vlan_tx_tag_present(skb)) {
5094 tx_flags |= E1000_TX_FLAGS_VLAN;
5095 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5096 }
5097
5098 first = tx_ring->next_to_use;
5099
5100 tso = e1000_tso(tx_ring, skb);
5101 if (tso < 0) {
5102 dev_kfree_skb_any(skb);
5103 return NETDEV_TX_OK;
5104 }
5105
5106 if (tso)
5107 tx_flags |= E1000_TX_FLAGS_TSO;
5108 else if (e1000_tx_csum(tx_ring, skb))
5109 tx_flags |= E1000_TX_FLAGS_CSUM;
5110
5111 /*
5112 * Old method was to assume IPv4 packet by default if TSO was enabled.
5113 * 82571 hardware supports TSO capabilities for IPv6 as well...
5114 * no longer assume, we must.
5115 */
5116 if (skb->protocol == htons(ETH_P_IP))
5117 tx_flags |= E1000_TX_FLAGS_IPV4;
5118
5119 if (unlikely(skb->no_fcs))
5120 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5121
5122 /* if count is 0 then mapping error has occurred */
5123 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5124 nr_frags);
5125 if (count) {
5126 netdev_sent_queue(netdev, skb->len);
5127 e1000_tx_queue(tx_ring, tx_flags, count);
5128 /* Make sure there is space in the ring for the next send. */
5129 e1000_maybe_stop_tx(tx_ring,
5130 (MAX_SKB_FRAGS *
5131 DIV_ROUND_UP(PAGE_SIZE,
5132 adapter->tx_fifo_limit) + 2));
5133 } else {
5134 dev_kfree_skb_any(skb);
5135 tx_ring->buffer_info[first].time_stamp = 0;
5136 tx_ring->next_to_use = first;
5137 }
5138
5139 return NETDEV_TX_OK;
5140 }
5141
5142 /**
5143 * e1000_tx_timeout - Respond to a Tx Hang
5144 * @netdev: network interface device structure
5145 **/
e1000_tx_timeout(struct net_device * netdev)5146 static void e1000_tx_timeout(struct net_device *netdev)
5147 {
5148 struct e1000_adapter *adapter = netdev_priv(netdev);
5149
5150 /* Do the reset outside of interrupt context */
5151 adapter->tx_timeout_count++;
5152 schedule_work(&adapter->reset_task);
5153 }
5154
e1000_reset_task(struct work_struct * work)5155 static void e1000_reset_task(struct work_struct *work)
5156 {
5157 struct e1000_adapter *adapter;
5158 adapter = container_of(work, struct e1000_adapter, reset_task);
5159
5160 /* don't run the task if already down */
5161 if (test_bit(__E1000_DOWN, &adapter->state))
5162 return;
5163
5164 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5165 (adapter->flags & FLAG_RX_RESTART_NOW))) {
5166 e1000e_dump(adapter);
5167 e_err("Reset adapter\n");
5168 }
5169 e1000e_reinit_locked(adapter);
5170 }
5171
5172 /**
5173 * e1000_get_stats64 - Get System Network Statistics
5174 * @netdev: network interface device structure
5175 * @stats: rtnl_link_stats64 pointer
5176 *
5177 * Returns the address of the device statistics structure.
5178 **/
e1000e_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)5179 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5180 struct rtnl_link_stats64 *stats)
5181 {
5182 struct e1000_adapter *adapter = netdev_priv(netdev);
5183
5184 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5185 spin_lock(&adapter->stats64_lock);
5186 e1000e_update_stats(adapter);
5187 /* Fill out the OS statistics structure */
5188 stats->rx_bytes = adapter->stats.gorc;
5189 stats->rx_packets = adapter->stats.gprc;
5190 stats->tx_bytes = adapter->stats.gotc;
5191 stats->tx_packets = adapter->stats.gptc;
5192 stats->multicast = adapter->stats.mprc;
5193 stats->collisions = adapter->stats.colc;
5194
5195 /* Rx Errors */
5196
5197 /*
5198 * RLEC on some newer hardware can be incorrect so build
5199 * our own version based on RUC and ROC
5200 */
5201 stats->rx_errors = adapter->stats.rxerrc +
5202 adapter->stats.crcerrs + adapter->stats.algnerrc +
5203 adapter->stats.ruc + adapter->stats.roc +
5204 adapter->stats.cexterr;
5205 stats->rx_length_errors = adapter->stats.ruc +
5206 adapter->stats.roc;
5207 stats->rx_crc_errors = adapter->stats.crcerrs;
5208 stats->rx_frame_errors = adapter->stats.algnerrc;
5209 stats->rx_missed_errors = adapter->stats.mpc;
5210
5211 /* Tx Errors */
5212 stats->tx_errors = adapter->stats.ecol +
5213 adapter->stats.latecol;
5214 stats->tx_aborted_errors = adapter->stats.ecol;
5215 stats->tx_window_errors = adapter->stats.latecol;
5216 stats->tx_carrier_errors = adapter->stats.tncrs;
5217
5218 /* Tx Dropped needs to be maintained elsewhere */
5219
5220 spin_unlock(&adapter->stats64_lock);
5221 return stats;
5222 }
5223
5224 /**
5225 * e1000_change_mtu - Change the Maximum Transfer Unit
5226 * @netdev: network interface device structure
5227 * @new_mtu: new value for maximum frame size
5228 *
5229 * Returns 0 on success, negative on failure
5230 **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)5231 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5232 {
5233 struct e1000_adapter *adapter = netdev_priv(netdev);
5234 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5235
5236 /* Jumbo frame support */
5237 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5238 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5239 e_err("Jumbo Frames not supported.\n");
5240 return -EINVAL;
5241 }
5242
5243 /* Supported frame sizes */
5244 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5245 (max_frame > adapter->max_hw_frame_size)) {
5246 e_err("Unsupported MTU setting\n");
5247 return -EINVAL;
5248 }
5249
5250 /* Jumbo frame workaround on 82579 requires CRC be stripped */
5251 if ((adapter->hw.mac.type == e1000_pch2lan) &&
5252 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5253 (new_mtu > ETH_DATA_LEN)) {
5254 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
5255 return -EINVAL;
5256 }
5257
5258 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5259 usleep_range(1000, 2000);
5260 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5261 adapter->max_frame_size = max_frame;
5262 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5263 netdev->mtu = new_mtu;
5264 if (netif_running(netdev))
5265 e1000e_down(adapter);
5266
5267 /*
5268 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5269 * means we reserve 2 more, this pushes us to allocate from the next
5270 * larger slab size.
5271 * i.e. RXBUFFER_2048 --> size-4096 slab
5272 * However with the new *_jumbo_rx* routines, jumbo receives will use
5273 * fragmented skbs
5274 */
5275
5276 if (max_frame <= 2048)
5277 adapter->rx_buffer_len = 2048;
5278 else
5279 adapter->rx_buffer_len = 4096;
5280
5281 /* adjust allocation if LPE protects us, and we aren't using SBP */
5282 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5283 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5284 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5285 + ETH_FCS_LEN;
5286
5287 if (netif_running(netdev))
5288 e1000e_up(adapter);
5289 else
5290 e1000e_reset(adapter);
5291
5292 clear_bit(__E1000_RESETTING, &adapter->state);
5293
5294 return 0;
5295 }
5296
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)5297 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5298 int cmd)
5299 {
5300 struct e1000_adapter *adapter = netdev_priv(netdev);
5301 struct mii_ioctl_data *data = if_mii(ifr);
5302
5303 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5304 return -EOPNOTSUPP;
5305
5306 switch (cmd) {
5307 case SIOCGMIIPHY:
5308 data->phy_id = adapter->hw.phy.addr;
5309 break;
5310 case SIOCGMIIREG:
5311 e1000_phy_read_status(adapter);
5312
5313 switch (data->reg_num & 0x1F) {
5314 case MII_BMCR:
5315 data->val_out = adapter->phy_regs.bmcr;
5316 break;
5317 case MII_BMSR:
5318 data->val_out = adapter->phy_regs.bmsr;
5319 break;
5320 case MII_PHYSID1:
5321 data->val_out = (adapter->hw.phy.id >> 16);
5322 break;
5323 case MII_PHYSID2:
5324 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5325 break;
5326 case MII_ADVERTISE:
5327 data->val_out = adapter->phy_regs.advertise;
5328 break;
5329 case MII_LPA:
5330 data->val_out = adapter->phy_regs.lpa;
5331 break;
5332 case MII_EXPANSION:
5333 data->val_out = adapter->phy_regs.expansion;
5334 break;
5335 case MII_CTRL1000:
5336 data->val_out = adapter->phy_regs.ctrl1000;
5337 break;
5338 case MII_STAT1000:
5339 data->val_out = adapter->phy_regs.stat1000;
5340 break;
5341 case MII_ESTATUS:
5342 data->val_out = adapter->phy_regs.estatus;
5343 break;
5344 default:
5345 return -EIO;
5346 }
5347 break;
5348 case SIOCSMIIREG:
5349 default:
5350 return -EOPNOTSUPP;
5351 }
5352 return 0;
5353 }
5354
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)5355 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5356 {
5357 switch (cmd) {
5358 case SIOCGMIIPHY:
5359 case SIOCGMIIREG:
5360 case SIOCSMIIREG:
5361 return e1000_mii_ioctl(netdev, ifr, cmd);
5362 default:
5363 return -EOPNOTSUPP;
5364 }
5365 }
5366
e1000_init_phy_wakeup(struct e1000_adapter * adapter,u32 wufc)5367 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5368 {
5369 struct e1000_hw *hw = &adapter->hw;
5370 u32 i, mac_reg;
5371 u16 phy_reg, wuc_enable;
5372 int retval = 0;
5373
5374 /* copy MAC RARs to PHY RARs */
5375 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5376
5377 retval = hw->phy.ops.acquire(hw);
5378 if (retval) {
5379 e_err("Could not acquire PHY\n");
5380 return retval;
5381 }
5382
5383 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5384 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5385 if (retval)
5386 goto release;
5387
5388 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5389 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5390 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5391 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5392 (u16)(mac_reg & 0xFFFF));
5393 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5394 (u16)((mac_reg >> 16) & 0xFFFF));
5395 }
5396
5397 /* configure PHY Rx Control register */
5398 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5399 mac_reg = er32(RCTL);
5400 if (mac_reg & E1000_RCTL_UPE)
5401 phy_reg |= BM_RCTL_UPE;
5402 if (mac_reg & E1000_RCTL_MPE)
5403 phy_reg |= BM_RCTL_MPE;
5404 phy_reg &= ~(BM_RCTL_MO_MASK);
5405 if (mac_reg & E1000_RCTL_MO_3)
5406 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5407 << BM_RCTL_MO_SHIFT);
5408 if (mac_reg & E1000_RCTL_BAM)
5409 phy_reg |= BM_RCTL_BAM;
5410 if (mac_reg & E1000_RCTL_PMCF)
5411 phy_reg |= BM_RCTL_PMCF;
5412 mac_reg = er32(CTRL);
5413 if (mac_reg & E1000_CTRL_RFCE)
5414 phy_reg |= BM_RCTL_RFCE;
5415 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5416
5417 /* enable PHY wakeup in MAC register */
5418 ew32(WUFC, wufc);
5419 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5420
5421 /* configure and enable PHY wakeup in PHY registers */
5422 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5423 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5424
5425 /* activate PHY wakeup */
5426 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5427 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5428 if (retval)
5429 e_err("Could not set PHY Host Wakeup bit\n");
5430 release:
5431 hw->phy.ops.release(hw);
5432
5433 return retval;
5434 }
5435
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake,bool runtime)5436 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5437 bool runtime)
5438 {
5439 struct net_device *netdev = pci_get_drvdata(pdev);
5440 struct e1000_adapter *adapter = netdev_priv(netdev);
5441 struct e1000_hw *hw = &adapter->hw;
5442 u32 ctrl, ctrl_ext, rctl, status;
5443 /* Runtime suspend should only enable wakeup for link changes */
5444 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5445 int retval = 0;
5446
5447 netif_device_detach(netdev);
5448
5449 if (netif_running(netdev)) {
5450 int count = E1000_CHECK_RESET_COUNT;
5451
5452 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5453 usleep_range(10000, 20000);
5454
5455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5456 e1000e_down(adapter);
5457 e1000_free_irq(adapter);
5458 }
5459 e1000e_reset_interrupt_capability(adapter);
5460
5461 retval = pci_save_state(pdev);
5462 if (retval)
5463 return retval;
5464
5465 status = er32(STATUS);
5466 if (status & E1000_STATUS_LU)
5467 wufc &= ~E1000_WUFC_LNKC;
5468
5469 if (wufc) {
5470 e1000_setup_rctl(adapter);
5471 e1000e_set_rx_mode(netdev);
5472
5473 /* turn on all-multi mode if wake on multicast is enabled */
5474 if (wufc & E1000_WUFC_MC) {
5475 rctl = er32(RCTL);
5476 rctl |= E1000_RCTL_MPE;
5477 ew32(RCTL, rctl);
5478 }
5479
5480 ctrl = er32(CTRL);
5481 /* advertise wake from D3Cold */
5482 #define E1000_CTRL_ADVD3WUC 0x00100000
5483 /* phy power management enable */
5484 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5485 ctrl |= E1000_CTRL_ADVD3WUC;
5486 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5487 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5488 ew32(CTRL, ctrl);
5489
5490 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5491 adapter->hw.phy.media_type ==
5492 e1000_media_type_internal_serdes) {
5493 /* keep the laser running in D3 */
5494 ctrl_ext = er32(CTRL_EXT);
5495 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5496 ew32(CTRL_EXT, ctrl_ext);
5497 }
5498
5499 if (adapter->flags & FLAG_IS_ICH)
5500 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5501
5502 /* Allow time for pending master requests to run */
5503 e1000e_disable_pcie_master(&adapter->hw);
5504
5505 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5506 /* enable wakeup by the PHY */
5507 retval = e1000_init_phy_wakeup(adapter, wufc);
5508 if (retval)
5509 return retval;
5510 } else {
5511 /* enable wakeup by the MAC */
5512 ew32(WUFC, wufc);
5513 ew32(WUC, E1000_WUC_PME_EN);
5514 }
5515 } else {
5516 ew32(WUC, 0);
5517 ew32(WUFC, 0);
5518 }
5519
5520 *enable_wake = !!wufc;
5521
5522 /* make sure adapter isn't asleep if manageability is enabled */
5523 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5524 (hw->mac.ops.check_mng_mode(hw)))
5525 *enable_wake = true;
5526
5527 if (adapter->hw.phy.type == e1000_phy_igp_3)
5528 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5529
5530 /*
5531 * Release control of h/w to f/w. If f/w is AMT enabled, this
5532 * would have already happened in close and is redundant.
5533 */
5534 e1000e_release_hw_control(adapter);
5535
5536 pci_clear_master(pdev);
5537
5538 return 0;
5539 }
5540
e1000_power_off(struct pci_dev * pdev,bool sleep,bool wake)5541 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5542 {
5543 if (sleep && wake) {
5544 pci_prepare_to_sleep(pdev);
5545 return;
5546 }
5547
5548 pci_wake_from_d3(pdev, wake);
5549 pci_set_power_state(pdev, PCI_D3hot);
5550 }
5551
e1000_complete_shutdown(struct pci_dev * pdev,bool sleep,bool wake)5552 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5553 bool wake)
5554 {
5555 struct net_device *netdev = pci_get_drvdata(pdev);
5556 struct e1000_adapter *adapter = netdev_priv(netdev);
5557
5558 /*
5559 * The pci-e switch on some quad port adapters will report a
5560 * correctable error when the MAC transitions from D0 to D3. To
5561 * prevent this we need to mask off the correctable errors on the
5562 * downstream port of the pci-e switch.
5563 */
5564 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5565 struct pci_dev *us_dev = pdev->bus->self;
5566 int pos = pci_pcie_cap(us_dev);
5567 u16 devctl;
5568
5569 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
5570 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
5571 (devctl & ~PCI_EXP_DEVCTL_CERE));
5572
5573 e1000_power_off(pdev, sleep, wake);
5574
5575 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
5576 } else {
5577 e1000_power_off(pdev, sleep, wake);
5578 }
5579 }
5580
5581 #ifdef CONFIG_PCIEASPM
__e1000e_disable_aspm(struct pci_dev * pdev,u16 state)5582 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5583 {
5584 pci_disable_link_state_locked(pdev, state);
5585 }
5586 #else
__e1000e_disable_aspm(struct pci_dev * pdev,u16 state)5587 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5588 {
5589 int pos;
5590 u16 reg16;
5591
5592 /*
5593 * Both device and parent should have the same ASPM setting.
5594 * Disable ASPM in downstream component first and then upstream.
5595 */
5596 pos = pci_pcie_cap(pdev);
5597 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
5598 reg16 &= ~state;
5599 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
5600
5601 if (!pdev->bus->self)
5602 return;
5603
5604 pos = pci_pcie_cap(pdev->bus->self);
5605 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16);
5606 reg16 &= ~state;
5607 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5608 }
5609 #endif
e1000e_disable_aspm(struct pci_dev * pdev,u16 state)5610 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5611 {
5612 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5613 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5614 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5615
5616 __e1000e_disable_aspm(pdev, state);
5617 }
5618
5619 #ifdef CONFIG_PM
e1000e_pm_ready(struct e1000_adapter * adapter)5620 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5621 {
5622 return !!adapter->tx_ring->buffer_info;
5623 }
5624
__e1000_resume(struct pci_dev * pdev)5625 static int __e1000_resume(struct pci_dev *pdev)
5626 {
5627 struct net_device *netdev = pci_get_drvdata(pdev);
5628 struct e1000_adapter *adapter = netdev_priv(netdev);
5629 struct e1000_hw *hw = &adapter->hw;
5630 u16 aspm_disable_flag = 0;
5631 u32 err;
5632
5633 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5634 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5635 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5636 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5637 if (aspm_disable_flag)
5638 e1000e_disable_aspm(pdev, aspm_disable_flag);
5639
5640 pci_set_power_state(pdev, PCI_D0);
5641 pci_restore_state(pdev);
5642 pci_save_state(pdev);
5643
5644 e1000e_set_interrupt_capability(adapter);
5645 if (netif_running(netdev)) {
5646 err = e1000_request_irq(adapter);
5647 if (err)
5648 return err;
5649 }
5650
5651 if (hw->mac.type == e1000_pch2lan)
5652 e1000_resume_workarounds_pchlan(&adapter->hw);
5653
5654 e1000e_power_up_phy(adapter);
5655
5656 /* report the system wakeup cause from S3/S4 */
5657 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5658 u16 phy_data;
5659
5660 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5661 if (phy_data) {
5662 e_info("PHY Wakeup cause - %s\n",
5663 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5664 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5665 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5666 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5667 phy_data & E1000_WUS_LNKC ?
5668 "Link Status Change" : "other");
5669 }
5670 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5671 } else {
5672 u32 wus = er32(WUS);
5673 if (wus) {
5674 e_info("MAC Wakeup cause - %s\n",
5675 wus & E1000_WUS_EX ? "Unicast Packet" :
5676 wus & E1000_WUS_MC ? "Multicast Packet" :
5677 wus & E1000_WUS_BC ? "Broadcast Packet" :
5678 wus & E1000_WUS_MAG ? "Magic Packet" :
5679 wus & E1000_WUS_LNKC ? "Link Status Change" :
5680 "other");
5681 }
5682 ew32(WUS, ~0);
5683 }
5684
5685 e1000e_reset(adapter);
5686
5687 e1000_init_manageability_pt(adapter);
5688
5689 if (netif_running(netdev))
5690 e1000e_up(adapter);
5691
5692 netif_device_attach(netdev);
5693
5694 /*
5695 * If the controller has AMT, do not set DRV_LOAD until the interface
5696 * is up. For all other cases, let the f/w know that the h/w is now
5697 * under the control of the driver.
5698 */
5699 if (!(adapter->flags & FLAG_HAS_AMT))
5700 e1000e_get_hw_control(adapter);
5701
5702 return 0;
5703 }
5704
5705 #ifdef CONFIG_PM_SLEEP
e1000_suspend(struct device * dev)5706 static int e1000_suspend(struct device *dev)
5707 {
5708 struct pci_dev *pdev = to_pci_dev(dev);
5709 int retval;
5710 bool wake;
5711
5712 retval = __e1000_shutdown(pdev, &wake, false);
5713 if (!retval)
5714 e1000_complete_shutdown(pdev, true, wake);
5715
5716 return retval;
5717 }
5718
e1000_resume(struct device * dev)5719 static int e1000_resume(struct device *dev)
5720 {
5721 struct pci_dev *pdev = to_pci_dev(dev);
5722 struct net_device *netdev = pci_get_drvdata(pdev);
5723 struct e1000_adapter *adapter = netdev_priv(netdev);
5724
5725 if (e1000e_pm_ready(adapter))
5726 adapter->idle_check = true;
5727
5728 return __e1000_resume(pdev);
5729 }
5730 #endif /* CONFIG_PM_SLEEP */
5731
5732 #ifdef CONFIG_PM_RUNTIME
e1000_runtime_suspend(struct device * dev)5733 static int e1000_runtime_suspend(struct device *dev)
5734 {
5735 struct pci_dev *pdev = to_pci_dev(dev);
5736 struct net_device *netdev = pci_get_drvdata(pdev);
5737 struct e1000_adapter *adapter = netdev_priv(netdev);
5738
5739 if (e1000e_pm_ready(adapter)) {
5740 bool wake;
5741
5742 __e1000_shutdown(pdev, &wake, true);
5743 }
5744
5745 return 0;
5746 }
5747
e1000_idle(struct device * dev)5748 static int e1000_idle(struct device *dev)
5749 {
5750 struct pci_dev *pdev = to_pci_dev(dev);
5751 struct net_device *netdev = pci_get_drvdata(pdev);
5752 struct e1000_adapter *adapter = netdev_priv(netdev);
5753
5754 if (!e1000e_pm_ready(adapter))
5755 return 0;
5756
5757 if (adapter->idle_check) {
5758 adapter->idle_check = false;
5759 if (!e1000e_has_link(adapter))
5760 pm_schedule_suspend(dev, MSEC_PER_SEC);
5761 }
5762
5763 return -EBUSY;
5764 }
5765
e1000_runtime_resume(struct device * dev)5766 static int e1000_runtime_resume(struct device *dev)
5767 {
5768 struct pci_dev *pdev = to_pci_dev(dev);
5769 struct net_device *netdev = pci_get_drvdata(pdev);
5770 struct e1000_adapter *adapter = netdev_priv(netdev);
5771
5772 if (!e1000e_pm_ready(adapter))
5773 return 0;
5774
5775 adapter->idle_check = !dev->power.runtime_auto;
5776 return __e1000_resume(pdev);
5777 }
5778 #endif /* CONFIG_PM_RUNTIME */
5779 #endif /* CONFIG_PM */
5780
e1000_shutdown(struct pci_dev * pdev)5781 static void e1000_shutdown(struct pci_dev *pdev)
5782 {
5783 bool wake = false;
5784
5785 __e1000_shutdown(pdev, &wake, false);
5786
5787 if (system_state == SYSTEM_POWER_OFF)
5788 e1000_complete_shutdown(pdev, false, wake);
5789 }
5790
5791 #ifdef CONFIG_NET_POLL_CONTROLLER
5792
e1000_intr_msix(int irq,void * data)5793 static irqreturn_t e1000_intr_msix(int irq, void *data)
5794 {
5795 struct net_device *netdev = data;
5796 struct e1000_adapter *adapter = netdev_priv(netdev);
5797
5798 if (adapter->msix_entries) {
5799 int vector, msix_irq;
5800
5801 vector = 0;
5802 msix_irq = adapter->msix_entries[vector].vector;
5803 disable_irq(msix_irq);
5804 e1000_intr_msix_rx(msix_irq, netdev);
5805 enable_irq(msix_irq);
5806
5807 vector++;
5808 msix_irq = adapter->msix_entries[vector].vector;
5809 disable_irq(msix_irq);
5810 e1000_intr_msix_tx(msix_irq, netdev);
5811 enable_irq(msix_irq);
5812
5813 vector++;
5814 msix_irq = adapter->msix_entries[vector].vector;
5815 disable_irq(msix_irq);
5816 e1000_msix_other(msix_irq, netdev);
5817 enable_irq(msix_irq);
5818 }
5819
5820 return IRQ_HANDLED;
5821 }
5822
5823 /*
5824 * Polling 'interrupt' - used by things like netconsole to send skbs
5825 * without having to re-enable interrupts. It's not called while
5826 * the interrupt routine is executing.
5827 */
e1000_netpoll(struct net_device * netdev)5828 static void e1000_netpoll(struct net_device *netdev)
5829 {
5830 struct e1000_adapter *adapter = netdev_priv(netdev);
5831
5832 switch (adapter->int_mode) {
5833 case E1000E_INT_MODE_MSIX:
5834 e1000_intr_msix(adapter->pdev->irq, netdev);
5835 break;
5836 case E1000E_INT_MODE_MSI:
5837 disable_irq(adapter->pdev->irq);
5838 e1000_intr_msi(adapter->pdev->irq, netdev);
5839 enable_irq(adapter->pdev->irq);
5840 break;
5841 default: /* E1000E_INT_MODE_LEGACY */
5842 disable_irq(adapter->pdev->irq);
5843 e1000_intr(adapter->pdev->irq, netdev);
5844 enable_irq(adapter->pdev->irq);
5845 break;
5846 }
5847 }
5848 #endif
5849
5850 /**
5851 * e1000_io_error_detected - called when PCI error is detected
5852 * @pdev: Pointer to PCI device
5853 * @state: The current pci connection state
5854 *
5855 * This function is called after a PCI bus error affecting
5856 * this device has been detected.
5857 */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5858 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5859 pci_channel_state_t state)
5860 {
5861 struct net_device *netdev = pci_get_drvdata(pdev);
5862 struct e1000_adapter *adapter = netdev_priv(netdev);
5863
5864 netif_device_detach(netdev);
5865
5866 if (state == pci_channel_io_perm_failure)
5867 return PCI_ERS_RESULT_DISCONNECT;
5868
5869 if (netif_running(netdev))
5870 e1000e_down(adapter);
5871 pci_disable_device(pdev);
5872
5873 /* Request a slot slot reset. */
5874 return PCI_ERS_RESULT_NEED_RESET;
5875 }
5876
5877 /**
5878 * e1000_io_slot_reset - called after the pci bus has been reset.
5879 * @pdev: Pointer to PCI device
5880 *
5881 * Restart the card from scratch, as if from a cold-boot. Implementation
5882 * resembles the first-half of the e1000_resume routine.
5883 */
e1000_io_slot_reset(struct pci_dev * pdev)5884 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5885 {
5886 struct net_device *netdev = pci_get_drvdata(pdev);
5887 struct e1000_adapter *adapter = netdev_priv(netdev);
5888 struct e1000_hw *hw = &adapter->hw;
5889 u16 aspm_disable_flag = 0;
5890 int err;
5891 pci_ers_result_t result;
5892
5893 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5894 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5895 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5896 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5897 if (aspm_disable_flag)
5898 e1000e_disable_aspm(pdev, aspm_disable_flag);
5899
5900 err = pci_enable_device_mem(pdev);
5901 if (err) {
5902 dev_err(&pdev->dev,
5903 "Cannot re-enable PCI device after reset.\n");
5904 result = PCI_ERS_RESULT_DISCONNECT;
5905 } else {
5906 pci_set_master(pdev);
5907 pdev->state_saved = true;
5908 pci_restore_state(pdev);
5909
5910 pci_enable_wake(pdev, PCI_D3hot, 0);
5911 pci_enable_wake(pdev, PCI_D3cold, 0);
5912
5913 e1000e_reset(adapter);
5914 ew32(WUS, ~0);
5915 result = PCI_ERS_RESULT_RECOVERED;
5916 }
5917
5918 pci_cleanup_aer_uncorrect_error_status(pdev);
5919
5920 return result;
5921 }
5922
5923 /**
5924 * e1000_io_resume - called when traffic can start flowing again.
5925 * @pdev: Pointer to PCI device
5926 *
5927 * This callback is called when the error recovery driver tells us that
5928 * its OK to resume normal operation. Implementation resembles the
5929 * second-half of the e1000_resume routine.
5930 */
e1000_io_resume(struct pci_dev * pdev)5931 static void e1000_io_resume(struct pci_dev *pdev)
5932 {
5933 struct net_device *netdev = pci_get_drvdata(pdev);
5934 struct e1000_adapter *adapter = netdev_priv(netdev);
5935
5936 e1000_init_manageability_pt(adapter);
5937
5938 if (netif_running(netdev)) {
5939 if (e1000e_up(adapter)) {
5940 dev_err(&pdev->dev,
5941 "can't bring device back up after reset\n");
5942 return;
5943 }
5944 }
5945
5946 netif_device_attach(netdev);
5947
5948 /*
5949 * If the controller has AMT, do not set DRV_LOAD until the interface
5950 * is up. For all other cases, let the f/w know that the h/w is now
5951 * under the control of the driver.
5952 */
5953 if (!(adapter->flags & FLAG_HAS_AMT))
5954 e1000e_get_hw_control(adapter);
5955
5956 }
5957
e1000_print_device_info(struct e1000_adapter * adapter)5958 static void e1000_print_device_info(struct e1000_adapter *adapter)
5959 {
5960 struct e1000_hw *hw = &adapter->hw;
5961 struct net_device *netdev = adapter->netdev;
5962 u32 ret_val;
5963 u8 pba_str[E1000_PBANUM_LENGTH];
5964
5965 /* print bus type/speed/width info */
5966 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5967 /* bus width */
5968 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5969 "Width x1"),
5970 /* MAC address */
5971 netdev->dev_addr);
5972 e_info("Intel(R) PRO/%s Network Connection\n",
5973 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5974 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5975 E1000_PBANUM_LENGTH);
5976 if (ret_val)
5977 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5978 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5979 hw->mac.type, hw->phy.type, pba_str);
5980 }
5981
e1000_eeprom_checks(struct e1000_adapter * adapter)5982 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5983 {
5984 struct e1000_hw *hw = &adapter->hw;
5985 int ret_val;
5986 u16 buf = 0;
5987
5988 if (hw->mac.type != e1000_82573)
5989 return;
5990
5991 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5992 le16_to_cpus(&buf);
5993 if (!ret_val && (!(buf & (1 << 0)))) {
5994 /* Deep Smart Power Down (DSPD) */
5995 dev_warn(&adapter->pdev->dev,
5996 "Warning: detected DSPD enabled in EEPROM\n");
5997 }
5998 }
5999
e1000_set_features(struct net_device * netdev,netdev_features_t features)6000 static int e1000_set_features(struct net_device *netdev,
6001 netdev_features_t features)
6002 {
6003 struct e1000_adapter *adapter = netdev_priv(netdev);
6004 netdev_features_t changed = features ^ netdev->features;
6005
6006 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6007 adapter->flags |= FLAG_TSO_FORCE;
6008
6009 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
6010 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6011 NETIF_F_RXALL)))
6012 return 0;
6013
6014 if (changed & NETIF_F_RXFCS) {
6015 if (features & NETIF_F_RXFCS) {
6016 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6017 } else {
6018 /* We need to take it back to defaults, which might mean
6019 * stripping is still disabled at the adapter level.
6020 */
6021 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6022 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6023 else
6024 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6025 }
6026 }
6027
6028 netdev->features = features;
6029
6030 if (netif_running(netdev))
6031 e1000e_reinit_locked(adapter);
6032 else
6033 e1000e_reset(adapter);
6034
6035 return 0;
6036 }
6037
6038 static const struct net_device_ops e1000e_netdev_ops = {
6039 .ndo_open = e1000_open,
6040 .ndo_stop = e1000_close,
6041 .ndo_start_xmit = e1000_xmit_frame,
6042 .ndo_get_stats64 = e1000e_get_stats64,
6043 .ndo_set_rx_mode = e1000e_set_rx_mode,
6044 .ndo_set_mac_address = e1000_set_mac,
6045 .ndo_change_mtu = e1000_change_mtu,
6046 .ndo_do_ioctl = e1000_ioctl,
6047 .ndo_tx_timeout = e1000_tx_timeout,
6048 .ndo_validate_addr = eth_validate_addr,
6049
6050 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6051 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6052 #ifdef CONFIG_NET_POLL_CONTROLLER
6053 .ndo_poll_controller = e1000_netpoll,
6054 #endif
6055 .ndo_set_features = e1000_set_features,
6056 };
6057
6058 /**
6059 * e1000_probe - Device Initialization Routine
6060 * @pdev: PCI device information struct
6061 * @ent: entry in e1000_pci_tbl
6062 *
6063 * Returns 0 on success, negative on failure
6064 *
6065 * e1000_probe initializes an adapter identified by a pci_dev structure.
6066 * The OS initialization, configuring of the adapter private structure,
6067 * and a hardware reset occur.
6068 **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)6069 static int __devinit e1000_probe(struct pci_dev *pdev,
6070 const struct pci_device_id *ent)
6071 {
6072 struct net_device *netdev;
6073 struct e1000_adapter *adapter;
6074 struct e1000_hw *hw;
6075 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6076 resource_size_t mmio_start, mmio_len;
6077 resource_size_t flash_start, flash_len;
6078 static int cards_found;
6079 u16 aspm_disable_flag = 0;
6080 int i, err, pci_using_dac;
6081 u16 eeprom_data = 0;
6082 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6083
6084 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6085 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6086 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6087 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6088 if (aspm_disable_flag)
6089 e1000e_disable_aspm(pdev, aspm_disable_flag);
6090
6091 err = pci_enable_device_mem(pdev);
6092 if (err)
6093 return err;
6094
6095 pci_using_dac = 0;
6096 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6097 if (!err) {
6098 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6099 if (!err)
6100 pci_using_dac = 1;
6101 } else {
6102 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6103 if (err) {
6104 err = dma_set_coherent_mask(&pdev->dev,
6105 DMA_BIT_MASK(32));
6106 if (err) {
6107 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
6108 goto err_dma;
6109 }
6110 }
6111 }
6112
6113 err = pci_request_selected_regions_exclusive(pdev,
6114 pci_select_bars(pdev, IORESOURCE_MEM),
6115 e1000e_driver_name);
6116 if (err)
6117 goto err_pci_reg;
6118
6119 /* AER (Advanced Error Reporting) hooks */
6120 pci_enable_pcie_error_reporting(pdev);
6121
6122 pci_set_master(pdev);
6123 /* PCI config space info */
6124 err = pci_save_state(pdev);
6125 if (err)
6126 goto err_alloc_etherdev;
6127
6128 err = -ENOMEM;
6129 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6130 if (!netdev)
6131 goto err_alloc_etherdev;
6132
6133 SET_NETDEV_DEV(netdev, &pdev->dev);
6134
6135 netdev->irq = pdev->irq;
6136
6137 pci_set_drvdata(pdev, netdev);
6138 adapter = netdev_priv(netdev);
6139 hw = &adapter->hw;
6140 adapter->netdev = netdev;
6141 adapter->pdev = pdev;
6142 adapter->ei = ei;
6143 adapter->pba = ei->pba;
6144 adapter->flags = ei->flags;
6145 adapter->flags2 = ei->flags2;
6146 adapter->hw.adapter = adapter;
6147 adapter->hw.mac.type = ei->mac;
6148 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6149 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6150
6151 mmio_start = pci_resource_start(pdev, 0);
6152 mmio_len = pci_resource_len(pdev, 0);
6153
6154 err = -EIO;
6155 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6156 if (!adapter->hw.hw_addr)
6157 goto err_ioremap;
6158
6159 if ((adapter->flags & FLAG_HAS_FLASH) &&
6160 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6161 flash_start = pci_resource_start(pdev, 1);
6162 flash_len = pci_resource_len(pdev, 1);
6163 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6164 if (!adapter->hw.flash_address)
6165 goto err_flashmap;
6166 }
6167
6168 /* construct the net_device struct */
6169 netdev->netdev_ops = &e1000e_netdev_ops;
6170 e1000e_set_ethtool_ops(netdev);
6171 netdev->watchdog_timeo = 5 * HZ;
6172 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
6173 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6174
6175 netdev->mem_start = mmio_start;
6176 netdev->mem_end = mmio_start + mmio_len;
6177
6178 adapter->bd_number = cards_found++;
6179
6180 e1000e_check_options(adapter);
6181
6182 /* setup adapter struct */
6183 err = e1000_sw_init(adapter);
6184 if (err)
6185 goto err_sw_init;
6186
6187 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6188 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6189 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6190
6191 err = ei->get_variants(adapter);
6192 if (err)
6193 goto err_hw_init;
6194
6195 if ((adapter->flags & FLAG_IS_ICH) &&
6196 (adapter->flags & FLAG_READ_ONLY_NVM))
6197 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6198
6199 hw->mac.ops.get_bus_info(&adapter->hw);
6200
6201 adapter->hw.phy.autoneg_wait_to_complete = 0;
6202
6203 /* Copper options */
6204 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6205 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6206 adapter->hw.phy.disable_polarity_correction = 0;
6207 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6208 }
6209
6210 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6211 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6212
6213 /* Set initial default active device features */
6214 netdev->features = (NETIF_F_SG |
6215 NETIF_F_HW_VLAN_RX |
6216 NETIF_F_HW_VLAN_TX |
6217 NETIF_F_TSO |
6218 NETIF_F_TSO6 |
6219 NETIF_F_RXHASH |
6220 NETIF_F_RXCSUM |
6221 NETIF_F_HW_CSUM);
6222
6223 /* Set user-changeable features (subset of all device features) */
6224 netdev->hw_features = netdev->features;
6225 netdev->hw_features |= NETIF_F_RXFCS;
6226 netdev->priv_flags |= IFF_SUPP_NOFCS;
6227 netdev->hw_features |= NETIF_F_RXALL;
6228
6229 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6230 netdev->features |= NETIF_F_HW_VLAN_FILTER;
6231
6232 netdev->vlan_features |= (NETIF_F_SG |
6233 NETIF_F_TSO |
6234 NETIF_F_TSO6 |
6235 NETIF_F_HW_CSUM);
6236
6237 netdev->priv_flags |= IFF_UNICAST_FLT;
6238
6239 if (pci_using_dac) {
6240 netdev->features |= NETIF_F_HIGHDMA;
6241 netdev->vlan_features |= NETIF_F_HIGHDMA;
6242 }
6243
6244 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6245 adapter->flags |= FLAG_MNG_PT_ENABLED;
6246
6247 /*
6248 * before reading the NVM, reset the controller to
6249 * put the device in a known good starting state
6250 */
6251 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6252
6253 /*
6254 * systems with ASPM and others may see the checksum fail on the first
6255 * attempt. Let's give it a few tries
6256 */
6257 for (i = 0;; i++) {
6258 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6259 break;
6260 if (i == 2) {
6261 e_err("The NVM Checksum Is Not Valid\n");
6262 err = -EIO;
6263 goto err_eeprom;
6264 }
6265 }
6266
6267 e1000_eeprom_checks(adapter);
6268
6269 /* copy the MAC address */
6270 if (e1000e_read_mac_addr(&adapter->hw))
6271 e_err("NVM Read Error while reading MAC address\n");
6272
6273 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6274 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6275
6276 if (!is_valid_ether_addr(netdev->perm_addr)) {
6277 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
6278 err = -EIO;
6279 goto err_eeprom;
6280 }
6281
6282 init_timer(&adapter->watchdog_timer);
6283 adapter->watchdog_timer.function = e1000_watchdog;
6284 adapter->watchdog_timer.data = (unsigned long) adapter;
6285
6286 init_timer(&adapter->phy_info_timer);
6287 adapter->phy_info_timer.function = e1000_update_phy_info;
6288 adapter->phy_info_timer.data = (unsigned long) adapter;
6289
6290 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6291 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6292 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6293 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6294 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6295
6296 /* Initialize link parameters. User can change them with ethtool */
6297 adapter->hw.mac.autoneg = 1;
6298 adapter->fc_autoneg = true;
6299 adapter->hw.fc.requested_mode = e1000_fc_default;
6300 adapter->hw.fc.current_mode = e1000_fc_default;
6301 adapter->hw.phy.autoneg_advertised = 0x2f;
6302
6303 /* ring size defaults */
6304 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6305 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6306
6307 /*
6308 * Initial Wake on LAN setting - If APM wake is enabled in
6309 * the EEPROM, enable the ACPI Magic Packet filter
6310 */
6311 if (adapter->flags & FLAG_APME_IN_WUC) {
6312 /* APME bit in EEPROM is mapped to WUC.APME */
6313 eeprom_data = er32(WUC);
6314 eeprom_apme_mask = E1000_WUC_APME;
6315 if ((hw->mac.type > e1000_ich10lan) &&
6316 (eeprom_data & E1000_WUC_PHY_WAKE))
6317 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6318 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6319 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6320 (adapter->hw.bus.func == 1))
6321 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6322 1, &eeprom_data);
6323 else
6324 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6325 1, &eeprom_data);
6326 }
6327
6328 /* fetch WoL from EEPROM */
6329 if (eeprom_data & eeprom_apme_mask)
6330 adapter->eeprom_wol |= E1000_WUFC_MAG;
6331
6332 /*
6333 * now that we have the eeprom settings, apply the special cases
6334 * where the eeprom may be wrong or the board simply won't support
6335 * wake on lan on a particular port
6336 */
6337 if (!(adapter->flags & FLAG_HAS_WOL))
6338 adapter->eeprom_wol = 0;
6339
6340 /* initialize the wol settings based on the eeprom settings */
6341 adapter->wol = adapter->eeprom_wol;
6342 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6343
6344 /* save off EEPROM version number */
6345 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6346
6347 /* reset the hardware with the new settings */
6348 e1000e_reset(adapter);
6349
6350 /*
6351 * If the controller has AMT, do not set DRV_LOAD until the interface
6352 * is up. For all other cases, let the f/w know that the h/w is now
6353 * under the control of the driver.
6354 */
6355 if (!(adapter->flags & FLAG_HAS_AMT))
6356 e1000e_get_hw_control(adapter);
6357
6358 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6359 err = register_netdev(netdev);
6360 if (err)
6361 goto err_register;
6362
6363 /* carrier off reporting is important to ethtool even BEFORE open */
6364 netif_carrier_off(netdev);
6365
6366 e1000_print_device_info(adapter);
6367
6368 if (pci_dev_run_wake(pdev))
6369 pm_runtime_put_noidle(&pdev->dev);
6370
6371 return 0;
6372
6373 err_register:
6374 if (!(adapter->flags & FLAG_HAS_AMT))
6375 e1000e_release_hw_control(adapter);
6376 err_eeprom:
6377 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6378 e1000_phy_hw_reset(&adapter->hw);
6379 err_hw_init:
6380 kfree(adapter->tx_ring);
6381 kfree(adapter->rx_ring);
6382 err_sw_init:
6383 if (adapter->hw.flash_address)
6384 iounmap(adapter->hw.flash_address);
6385 e1000e_reset_interrupt_capability(adapter);
6386 err_flashmap:
6387 iounmap(adapter->hw.hw_addr);
6388 err_ioremap:
6389 free_netdev(netdev);
6390 err_alloc_etherdev:
6391 pci_release_selected_regions(pdev,
6392 pci_select_bars(pdev, IORESOURCE_MEM));
6393 err_pci_reg:
6394 err_dma:
6395 pci_disable_device(pdev);
6396 return err;
6397 }
6398
6399 /**
6400 * e1000_remove - Device Removal Routine
6401 * @pdev: PCI device information struct
6402 *
6403 * e1000_remove is called by the PCI subsystem to alert the driver
6404 * that it should release a PCI device. The could be caused by a
6405 * Hot-Plug event, or because the driver is going to be removed from
6406 * memory.
6407 **/
e1000_remove(struct pci_dev * pdev)6408 static void __devexit e1000_remove(struct pci_dev *pdev)
6409 {
6410 struct net_device *netdev = pci_get_drvdata(pdev);
6411 struct e1000_adapter *adapter = netdev_priv(netdev);
6412 bool down = test_bit(__E1000_DOWN, &adapter->state);
6413
6414 /*
6415 * The timers may be rescheduled, so explicitly disable them
6416 * from being rescheduled.
6417 */
6418 if (!down)
6419 set_bit(__E1000_DOWN, &adapter->state);
6420 del_timer_sync(&adapter->watchdog_timer);
6421 del_timer_sync(&adapter->phy_info_timer);
6422
6423 cancel_work_sync(&adapter->reset_task);
6424 cancel_work_sync(&adapter->watchdog_task);
6425 cancel_work_sync(&adapter->downshift_task);
6426 cancel_work_sync(&adapter->update_phy_task);
6427 cancel_work_sync(&adapter->print_hang_task);
6428
6429 if (!(netdev->flags & IFF_UP))
6430 e1000_power_down_phy(adapter);
6431
6432 /* Don't lie to e1000_close() down the road. */
6433 if (!down)
6434 clear_bit(__E1000_DOWN, &adapter->state);
6435 unregister_netdev(netdev);
6436
6437 if (pci_dev_run_wake(pdev))
6438 pm_runtime_get_noresume(&pdev->dev);
6439
6440 /*
6441 * Release control of h/w to f/w. If f/w is AMT enabled, this
6442 * would have already happened in close and is redundant.
6443 */
6444 e1000e_release_hw_control(adapter);
6445
6446 e1000e_reset_interrupt_capability(adapter);
6447 kfree(adapter->tx_ring);
6448 kfree(adapter->rx_ring);
6449
6450 iounmap(adapter->hw.hw_addr);
6451 if (adapter->hw.flash_address)
6452 iounmap(adapter->hw.flash_address);
6453 pci_release_selected_regions(pdev,
6454 pci_select_bars(pdev, IORESOURCE_MEM));
6455
6456 free_netdev(netdev);
6457
6458 /* AER disable */
6459 pci_disable_pcie_error_reporting(pdev);
6460
6461 pci_disable_device(pdev);
6462 }
6463
6464 /* PCI Error Recovery (ERS) */
6465 static struct pci_error_handlers e1000_err_handler = {
6466 .error_detected = e1000_io_error_detected,
6467 .slot_reset = e1000_io_slot_reset,
6468 .resume = e1000_io_resume,
6469 };
6470
6471 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6472 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6473 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6474 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6475 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6476 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6477 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6478 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6479 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6480 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6481
6482 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6483 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6484 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6485 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6486
6487 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6488 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6489 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6490
6491 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6492 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6493 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6494
6495 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6496 board_80003es2lan },
6497 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6498 board_80003es2lan },
6499 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6500 board_80003es2lan },
6501 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6502 board_80003es2lan },
6503
6504 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6505 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6506 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6507 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6508 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6509 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6510 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6511 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6512
6513 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6514 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6515 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6516 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6517 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6518 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6519 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6520 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6521 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6522
6523 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6524 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6525 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6526
6527 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6528 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6529 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6530
6531 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6532 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6533 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6534 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6535
6536 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6537 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6538
6539 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6540 };
6541 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6542
6543 #ifdef CONFIG_PM
6544 static const struct dev_pm_ops e1000_pm_ops = {
6545 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6546 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6547 e1000_runtime_resume, e1000_idle)
6548 };
6549 #endif
6550
6551 /* PCI Device API Driver */
6552 static struct pci_driver e1000_driver = {
6553 .name = e1000e_driver_name,
6554 .id_table = e1000_pci_tbl,
6555 .probe = e1000_probe,
6556 .remove = __devexit_p(e1000_remove),
6557 #ifdef CONFIG_PM
6558 .driver = {
6559 .pm = &e1000_pm_ops,
6560 },
6561 #endif
6562 .shutdown = e1000_shutdown,
6563 .err_handler = &e1000_err_handler
6564 };
6565
6566 /**
6567 * e1000_init_module - Driver Registration Routine
6568 *
6569 * e1000_init_module is the first routine called when the driver is
6570 * loaded. All it does is register with the PCI subsystem.
6571 **/
e1000_init_module(void)6572 static int __init e1000_init_module(void)
6573 {
6574 int ret;
6575 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6576 e1000e_driver_version);
6577 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6578 ret = pci_register_driver(&e1000_driver);
6579
6580 return ret;
6581 }
6582 module_init(e1000_init_module);
6583
6584 /**
6585 * e1000_exit_module - Driver Exit Cleanup Routine
6586 *
6587 * e1000_exit_module is called just before the driver is removed
6588 * from memory.
6589 **/
e1000_exit_module(void)6590 static void __exit e1000_exit_module(void)
6591 {
6592 pci_unregister_driver(&e1000_driver);
6593 }
6594 module_exit(e1000_exit_module);
6595
6596
6597 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6598 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6599 MODULE_LICENSE("GPL");
6600 MODULE_VERSION(DRV_VERSION);
6601
6602 /* netdev.c */
6603