1 /*******************************************************************************
2
3
4 Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
27
28 /**********************************************************************
29 * *
30 * INTEL CORPORATION *
31 * *
32 * This software is supplied under the terms of the license included *
33 * above. All use of this driver must be in accordance with the terms *
34 * of that license. *
35 * *
36 * Module Name: e100_main.c *
37 * *
38 * Abstract: Functions for the driver entry points like load, *
39 * unload, open and close. All board specific calls made *
40 * by the network interface section of the driver. *
41 * *
42 * Environment: This file is intended to be specific to the Linux *
43 * operating system. *
44 * *
45 **********************************************************************/
46
47 /* Change Log
48 *
49 * 2.3.40 2/13/04
50 * o Updated microcode for D102 rev 15 and rev 16 to include fix
51 * for TCO issue. NFS packets would be misinterpreted as TCO packets
52 * and incorrectly routed to the BMC over SMBus. The microcode fix
53 * checks the fragmented IP bit in the NFS/UDP header to distinguish
54 * between NFS and TCO.
55 * o Bug fix: don't strip MAC header count from Rx byte count.
56 * Ben Greear (greear@candeltech.com).
57 *
58 * 2.3.38 12/14/03
59 * o Added netpoll support.
60 * o Added ICH6 device ID support
61 * o Moved to 2.6 APIs: pci_name() and free_netdev().
62 * o Removed some __devinit from some functions that shouldn't be marked
63 * as such (Anton Blanchard [anton@samba.org]).
64 *
65 * 2.3.33 10/21/03
66 */
67
68 #include <linux/config.h>
69 #include <net/checksum.h>
70 #include <linux/tcp.h>
71 #include <linux/udp.h>
72 #include "e100.h"
73 #include "e100_ucode.h"
74 #include "e100_config.h"
75 #include "e100_phy.h"
76
77 extern void e100_force_speed_duplex_to_phy(struct e100_private *bdp);
78
79 static char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
80 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
81 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
82 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
83 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
84 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
85 "tx_heartbeat_errors", "tx_window_errors",
86 /* device-specific stats */
87 "tx_late_collision_errors", "tx_deferred",
88 "tx_single_collisions", "tx_multi_collisions",
89 "rx_collision_detected_errors", "tx_flow_control_pause",
90 "rx_flow_control_pause", "rx_flow_control_unsupported",
91 "tx_tco_packets", "rx_tco_packets",
92 };
93 #define E100_NET_STATS_LEN 21
94 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
95
96 static int e100_do_ethtool_ioctl(struct net_device *, struct ifreq *);
97 static void e100_get_speed_duplex_caps(struct e100_private *);
98 static int e100_ethtool_get_settings(struct net_device *, struct ifreq *);
99 static int e100_ethtool_set_settings(struct net_device *, struct ifreq *);
100
101 static int e100_ethtool_get_drvinfo(struct net_device *, struct ifreq *);
102 static int e100_ethtool_eeprom(struct net_device *, struct ifreq *);
103
104 #define E100_EEPROM_MAGIC 0x1234
105 static int e100_ethtool_glink(struct net_device *, struct ifreq *);
106 static int e100_ethtool_gregs(struct net_device *, struct ifreq *);
107 static int e100_ethtool_nway_rst(struct net_device *, struct ifreq *);
108 static int e100_ethtool_wol(struct net_device *, struct ifreq *);
109 #ifdef CONFIG_PM
110 static unsigned char e100_setup_filter(struct e100_private *bdp);
111 static void e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp);
112 #endif
113 static u16 e100_get_ip_lbytes(struct net_device *dev);
114 extern void e100_config_wol(struct e100_private *bdp);
115 extern u32 e100_run_diag(struct net_device *dev, u64 *test_info, u32 flags);
116 static int e100_ethtool_test(struct net_device *, struct ifreq *);
117 static int e100_ethtool_gstrings(struct net_device *, struct ifreq *);
118 static char test_strings[][ETH_GSTRING_LEN] = {
119 "Link test (on/offline)",
120 "Eeprom test (on/offline)",
121 "Self test (offline)",
122 "Mac loopback (offline)",
123 "Phy loopback (offline)",
124 "Cable diagnostic (offline)"
125 };
126
127 static int e100_ethtool_led_blink(struct net_device *, struct ifreq *);
128
129 static int e100_mii_ioctl(struct net_device *, struct ifreq *, int);
130
131 static unsigned char e100_delayed_exec_non_cu_cmd(struct e100_private *,
132 nxmit_cb_entry_t *);
133 static void e100_free_nontx_list(struct e100_private *);
134 static void e100_non_tx_background(unsigned long);
135 static inline void e100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb);
136 /* Global Data structures and variables */
137 char e100_copyright[] __devinitdata = "Copyright (c) 2004 Intel Corporation";
138 char e100_driver_version[]="2.3.43-k1";
139 const char *e100_full_driver_name = "Intel(R) PRO/100 Network Driver";
140 char e100_short_driver_name[] = "e100";
141 static int e100nics = 0;
142 static void e100_vlan_rx_register(struct net_device *netdev, struct vlan_group
143 *grp);
144 static void e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
145 static void e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
146
147 #ifdef CONFIG_NET_POLL_CONTROLLER
148 /* for netdump / net console */
149 static void e100_netpoll (struct net_device *dev);
150 #endif
151
152 #ifdef CONFIG_PM
153 static int e100_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
154 static int e100_suspend(struct pci_dev *pcid, u32 state);
155 static int e100_resume(struct pci_dev *pcid);
156 static unsigned char e100_asf_enabled(struct e100_private *bdp);
157 struct notifier_block e100_notifier_reboot = {
158 .notifier_call = e100_notify_reboot,
159 .next = NULL,
160 .priority = 0
161 };
162 #endif
163
164 /*********************************************************************/
165 /*! This is a GCC extension to ANSI C.
166 * See the item "Labeled Elements in Initializers" in the section
167 * "Extensions to the C Language Family" of the GCC documentation.
168 *********************************************************************/
169 #define E100_PARAM_INIT { [0 ... E100_MAX_NIC] = -1 }
170
171 /* All parameters are treated the same, as an integer array of values.
172 * This macro just reduces the need to repeat the same declaration code
173 * over and over (plus this helps to avoid typo bugs).
174 */
175 #define E100_PARAM(X, S) \
176 static const int X[E100_MAX_NIC + 1] = E100_PARAM_INIT; \
177 MODULE_PARM(X, "1-" __MODULE_STRING(E100_MAX_NIC) "i"); \
178 MODULE_PARM_DESC(X, S);
179
180 /* ====================================================================== */
181 static u8 e100_D101M_checksum(struct e100_private *, struct sk_buff *);
182 static u8 e100_D102_check_checksum(rfd_t *);
183 static int e100_ioctl(struct net_device *, struct ifreq *, int);
184 static int e100_change_mtu(struct net_device *, int);
185 static int e100_xmit_frame(struct sk_buff *, struct net_device *);
186 static unsigned char e100_init(struct e100_private *);
187 static int e100_set_mac(struct net_device *, void *);
188 struct net_device_stats *e100_get_stats(struct net_device *);
189
190 static irqreturn_t e100intr(int, void *, struct pt_regs *);
191 static void e100_print_brd_conf(struct e100_private *);
192 static void e100_set_multi(struct net_device *);
193
194 static u8 e100_pci_setup(struct pci_dev *, struct e100_private *);
195 static u8 e100_sw_init(struct e100_private *);
196 static void e100_tco_workaround(struct e100_private *);
197 static unsigned char e100_alloc_space(struct e100_private *);
198 static void e100_dealloc_space(struct e100_private *);
199 static int e100_alloc_tcb_pool(struct e100_private *);
200 static void e100_setup_tcb_pool(tcb_t *, unsigned int, struct e100_private *);
201 static void e100_free_tcb_pool(struct e100_private *);
202 static int e100_alloc_rfd_pool(struct e100_private *);
203 static void e100_free_rfd_pool(struct e100_private *);
204
205 static void e100_rd_eaddr(struct e100_private *);
206 static void e100_rd_pwa_no(struct e100_private *);
207 extern u16 e100_eeprom_read(struct e100_private *, u16);
208 extern void e100_eeprom_write_block(struct e100_private *, u16, u16 *, u16);
209 extern u16 e100_eeprom_size(struct e100_private *);
210 u16 e100_eeprom_calculate_chksum(struct e100_private *adapter);
211
212 static unsigned char e100_clr_cntrs(struct e100_private *);
213 static unsigned char e100_load_microcode(struct e100_private *);
214 static unsigned char e100_setup_iaaddr(struct e100_private *, u8 *);
215 static unsigned char e100_update_stats(struct e100_private *bdp);
216
217 static void e100_start_ru(struct e100_private *);
218 static void e100_dump_stats_cntrs(struct e100_private *);
219
220 static void e100_check_options(int board, struct e100_private *bdp);
221 static void e100_set_int_option(int *, int, int, int, int, char *);
222 static void e100_set_bool_option(struct e100_private *bdp, int, u32, int,
223 char *);
224 unsigned char e100_wait_exec_cmplx(struct e100_private *, u32, u8, u8);
225 void e100_exec_cmplx(struct e100_private *, u32, u8);
226
227 /**
228 * e100_get_rx_struct - retrieve cell to hold skb buff from the pool
229 * @bdp: atapter's private data struct
230 *
231 * Returns the new cell to hold sk_buff or %NULL.
232 */
233 static inline struct rx_list_elem *
e100_get_rx_struct(struct e100_private * bdp)234 e100_get_rx_struct(struct e100_private *bdp)
235 {
236 struct rx_list_elem *rx_struct = NULL;
237
238 if (!list_empty(&(bdp->rx_struct_pool))) {
239 rx_struct = list_entry(bdp->rx_struct_pool.next,
240 struct rx_list_elem, list_elem);
241 list_del(&(rx_struct->list_elem));
242 }
243
244 return rx_struct;
245 }
246
247 /**
248 * e100_alloc_skb - allocate an skb for the adapter
249 * @bdp: atapter's private data struct
250 *
251 * Allocates skb with enough room for rfd, and data, and reserve non-data space.
252 * Returns the new cell with sk_buff or %NULL.
253 */
254 static inline struct rx_list_elem *
e100_alloc_skb(struct e100_private * bdp)255 e100_alloc_skb(struct e100_private *bdp)
256 {
257 struct sk_buff *new_skb;
258 u32 skb_size = sizeof (rfd_t);
259 struct rx_list_elem *rx_struct;
260
261 new_skb = (struct sk_buff *) dev_alloc_skb(skb_size);
262 if (new_skb) {
263 /* The IP data should be
264 DWORD aligned. since the ethernet header is 14 bytes long,
265 we need to reserve 2 extra bytes so that the TCP/IP headers
266 will be DWORD aligned. */
267 skb_reserve(new_skb, 2);
268 if ((rx_struct = e100_get_rx_struct(bdp)) == NULL)
269 goto err;
270 rx_struct->skb = new_skb;
271 rx_struct->dma_addr = pci_map_single(bdp->pdev, new_skb->data,
272 sizeof (rfd_t),
273 PCI_DMA_FROMDEVICE);
274 if (!rx_struct->dma_addr)
275 goto err;
276 skb_reserve(new_skb, bdp->rfd_size);
277 return rx_struct;
278 } else {
279 return NULL;
280 }
281
282 err:
283 dev_kfree_skb_irq(new_skb);
284 return NULL;
285 }
286
287 /**
288 * e100_add_skb_to_end - add an skb to the end of our rfd list
289 * @bdp: atapter's private data struct
290 * @rx_struct: rx_list_elem with the new skb
291 *
292 * Adds a newly allocated skb to the end of our rfd list.
293 */
294 inline void
e100_add_skb_to_end(struct e100_private * bdp,struct rx_list_elem * rx_struct)295 e100_add_skb_to_end(struct e100_private *bdp, struct rx_list_elem *rx_struct)
296 {
297 rfd_t *rfdn; /* The new rfd */
298 rfd_t *rfd; /* The old rfd */
299 struct rx_list_elem *rx_struct_last;
300
301 (rx_struct->skb)->dev = bdp->device;
302 rfdn = RFD_POINTER(rx_struct->skb, bdp);
303 rfdn->rfd_header.cb_status = 0;
304 rfdn->rfd_header.cb_cmd = __constant_cpu_to_le16(RFD_EL_BIT);
305 rfdn->rfd_act_cnt = 0;
306 rfdn->rfd_sz = __constant_cpu_to_le16(RFD_DATA_SIZE);
307
308 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr, bdp->rfd_size,
309 PCI_DMA_TODEVICE);
310
311 if (!list_empty(&(bdp->active_rx_list))) {
312 rx_struct_last = list_entry(bdp->active_rx_list.prev,
313 struct rx_list_elem, list_elem);
314 rfd = RFD_POINTER(rx_struct_last->skb, bdp);
315 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
316 4, PCI_DMA_FROMDEVICE);
317 put_unaligned(cpu_to_le32(rx_struct->dma_addr),
318 ((u32 *) (&(rfd->rfd_header.cb_lnk_ptr))));
319
320 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
321 8, PCI_DMA_TODEVICE);
322 rfd->rfd_header.cb_cmd &=
323 __constant_cpu_to_le16((u16) ~RFD_EL_BIT);
324
325 pci_dma_sync_single(bdp->pdev, rx_struct_last->dma_addr,
326 4, PCI_DMA_TODEVICE);
327 }
328
329 list_add_tail(&(rx_struct->list_elem), &(bdp->active_rx_list));
330 }
331
332 static inline void
e100_alloc_skbs(struct e100_private * bdp)333 e100_alloc_skbs(struct e100_private *bdp)
334 {
335 for (; bdp->skb_req > 0; bdp->skb_req--) {
336 struct rx_list_elem *rx_struct;
337
338 if ((rx_struct = e100_alloc_skb(bdp)) == NULL)
339 return;
340
341 e100_add_skb_to_end(bdp, rx_struct);
342 }
343 }
344
345 void e100_tx_srv(struct e100_private *);
346 u32 e100_rx_srv(struct e100_private *);
347
348 void e100_watchdog(struct net_device *);
349 void e100_refresh_txthld(struct e100_private *);
350 void e100_manage_adaptive_ifs(struct e100_private *);
351 void e100_clear_pools(struct e100_private *);
352 static void e100_clear_structs(struct net_device *);
353 static inline tcb_t *e100_prepare_xmit_buff(struct e100_private *,
354 struct sk_buff *);
355 static void e100_set_multi_exec(struct net_device *dev);
356
357 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
358 MODULE_DESCRIPTION("Intel(R) PRO/100 Network Driver");
359 MODULE_LICENSE("GPL");
360
361 E100_PARAM(TxDescriptors, "Number of transmit descriptors");
362 E100_PARAM(RxDescriptors, "Number of receive descriptors");
363 E100_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
364 E100_PARAM(e100_speed_duplex, "Speed and Duplex settings");
365 E100_PARAM(ucode, "Disable or enable microcode loading");
366 E100_PARAM(ber, "Value for the BER correction algorithm");
367 E100_PARAM(flow_control, "Disable or enable Ethernet PAUSE frames processing");
368 E100_PARAM(IntDelay, "Value for CPU saver's interrupt delay");
369 E100_PARAM(BundleSmallFr, "Disable or enable interrupt bundling of small frames");
370 E100_PARAM(BundleMax, "Maximum number for CPU saver's packet bundling");
371 E100_PARAM(IFS, "Disable or enable the adaptive IFS algorithm");
372
373 /**
374 * e100_exec_cmd - issue a comand
375 * @bdp: atapter's private data struct
376 * @scb_cmd_low: the command that is to be issued
377 *
378 * This general routine will issue a command to the e100.
379 */
380 static inline void
e100_exec_cmd(struct e100_private * bdp,u8 cmd_low)381 e100_exec_cmd(struct e100_private *bdp, u8 cmd_low)
382 {
383 writeb(cmd_low, &(bdp->scb->scb_cmd_low));
384 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
385 }
386
387 /**
388 * e100_wait_scb - wait for SCB to clear
389 * @bdp: atapter's private data struct
390 *
391 * This routine checks to see if the e100 has accepted a command.
392 * It does so by checking the command field in the SCB, which will
393 * be zeroed by the e100 upon accepting a command. The loop waits
394 * for up to 1 millisecond for command acceptance.
395 *
396 * Returns:
397 * true if the SCB cleared within 1 millisecond.
398 * false if it didn't clear within 1 millisecond
399 */
400 unsigned char
e100_wait_scb(struct e100_private * bdp)401 e100_wait_scb(struct e100_private *bdp)
402 {
403 int i;
404
405 /* loop on the scb for a few times */
406 for (i = 0; i < 100; i++) {
407 if (!readb(&bdp->scb->scb_cmd_low))
408 return true;
409 cpu_relax();
410 }
411
412 /* it didn't work. do it the slow way using udelay()s */
413 for (i = 0; i < E100_MAX_SCB_WAIT; i++) {
414 if (!readb(&bdp->scb->scb_cmd_low))
415 return true;
416 cpu_relax();
417 udelay(1);
418 }
419
420 return false;
421 }
422
423 /**
424 * e100_wait_exec_simple - issue a command
425 * @bdp: atapter's private data struct
426 * @scb_cmd_low: the command that is to be issued
427 *
428 * This general routine will issue a command to the e100 after waiting for
429 * the previous command to finish.
430 *
431 * Returns:
432 * true if the command was issued to the chip successfully
433 * false if the command was not issued to the chip
434 */
435 inline unsigned char
e100_wait_exec_simple(struct e100_private * bdp,u8 scb_cmd_low)436 e100_wait_exec_simple(struct e100_private *bdp, u8 scb_cmd_low)
437 {
438 if (!e100_wait_scb(bdp)) {
439 printk(KERN_DEBUG "e100: %s: e100_wait_exec_simple: failed\n",
440 bdp->device->name);
441 #ifdef E100_CU_DEBUG
442 printk(KERN_ERR "e100: %s: Last command (%x/%x) "
443 "timeout\n", bdp->device->name,
444 bdp->last_cmd, bdp->last_sub_cmd);
445 printk(KERN_ERR "e100: %s: Current simple command (%x) "
446 "can't be executed\n",
447 bdp->device->name, scb_cmd_low);
448 #endif
449 return false;
450 }
451 e100_exec_cmd(bdp, scb_cmd_low);
452 #ifdef E100_CU_DEBUG
453 bdp->last_cmd = scb_cmd_low;
454 bdp->last_sub_cmd = 0;
455 #endif
456 return true;
457 }
458
459 void
e100_exec_cmplx(struct e100_private * bdp,u32 phys_addr,u8 cmd)460 e100_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd)
461 {
462 writel(phys_addr, &(bdp->scb->scb_gen_ptr));
463 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
464 e100_exec_cmd(bdp, cmd);
465 }
466
467 unsigned char
e100_wait_exec_cmplx(struct e100_private * bdp,u32 phys_addr,u8 cmd,u8 sub_cmd)468 e100_wait_exec_cmplx(struct e100_private *bdp, u32 phys_addr, u8 cmd, u8 sub_cmd)
469 {
470 if (!e100_wait_scb(bdp)) {
471 #ifdef E100_CU_DEBUG
472 printk(KERN_ERR "e100: %s: Last command (%x/%x) "
473 "timeout\n", bdp->device->name,
474 bdp->last_cmd, bdp->last_sub_cmd);
475 printk(KERN_ERR "e100: %s: Current complex command "
476 "(%x/%x) can't be executed\n",
477 bdp->device->name, cmd, sub_cmd);
478 #endif
479 return false;
480 }
481 e100_exec_cmplx(bdp, phys_addr, cmd);
482 #ifdef E100_CU_DEBUG
483 bdp->last_cmd = cmd;
484 bdp->last_sub_cmd = sub_cmd;
485 #endif
486 return true;
487 }
488
489 inline u8
e100_wait_cus_idle(struct e100_private * bdp)490 e100_wait_cus_idle(struct e100_private *bdp)
491 {
492 int i;
493
494 /* loop on the scb for a few times */
495 for (i = 0; i < 100; i++) {
496 if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
497 SCB_CUS_ACTIVE)) {
498 return true;
499 }
500 cpu_relax();
501 }
502
503 for (i = 0; i < E100_MAX_CU_IDLE_WAIT; i++) {
504 if (((readw(&(bdp->scb->scb_status)) & SCB_CUS_MASK) !=
505 SCB_CUS_ACTIVE)) {
506 return true;
507 }
508 cpu_relax();
509 udelay(1);
510 }
511
512 return false;
513 }
514
515 /**
516 * e100_disable_clear_intr - disable and clear/ack interrupts
517 * @bdp: atapter's private data struct
518 *
519 * This routine disables interrupts at the hardware, by setting
520 * the M (mask) bit in the adapter's CSR SCB command word.
521 * It also clear/ack interrupts.
522 */
523 static inline void
e100_disable_clear_intr(struct e100_private * bdp)524 e100_disable_clear_intr(struct e100_private *bdp)
525 {
526 u16 intr_status;
527 /* Disable interrupts on our PCI board by setting the mask bit */
528 writeb(SCB_INT_MASK, &bdp->scb->scb_cmd_hi);
529 intr_status = readw(&bdp->scb->scb_status);
530 /* ack and clear intrs */
531 writew(intr_status, &bdp->scb->scb_status);
532 readw(&bdp->scb->scb_status);
533 }
534
535 /**
536 * e100_set_intr_mask - set interrupts
537 * @bdp: atapter's private data struct
538 *
539 * This routine sets interrupts at the hardware, by resetting
540 * the M (mask) bit in the adapter's CSR SCB command word
541 */
542 static inline void
e100_set_intr_mask(struct e100_private * bdp)543 e100_set_intr_mask(struct e100_private *bdp)
544 {
545 writeb(bdp->intr_mask, &bdp->scb->scb_cmd_hi);
546 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
547 }
548
549 static inline void
e100_trigger_SWI(struct e100_private * bdp)550 e100_trigger_SWI(struct e100_private *bdp)
551 {
552 /* Trigger interrupt on our PCI board by asserting SWI bit */
553 writeb(SCB_SOFT_INT, &bdp->scb->scb_cmd_hi);
554 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
555 }
556
557 static int __devinit
e100_found1(struct pci_dev * pcid,const struct pci_device_id * ent)558 e100_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
559 {
560 static int first_time = true;
561 struct net_device *dev = NULL;
562 struct e100_private *bdp = NULL;
563 int rc = 0;
564 u16 cal_checksum, read_checksum;
565
566 dev = alloc_etherdev(sizeof (struct e100_private));
567 if (dev == NULL) {
568 printk(KERN_ERR "e100: Not able to alloc etherdev struct\n");
569 rc = -ENODEV;
570 goto out;
571 }
572
573 SET_MODULE_OWNER(dev);
574
575 if (first_time) {
576 first_time = false;
577 printk(KERN_NOTICE "%s - version %s\n",
578 e100_full_driver_name, e100_driver_version);
579 printk(KERN_NOTICE "%s\n", e100_copyright);
580 printk(KERN_NOTICE "\n");
581 }
582
583 bdp = dev->priv;
584 bdp->pdev = pcid;
585 bdp->device = dev;
586
587 pci_set_drvdata(pcid, dev);
588 SET_NETDEV_DEV(dev, &pcid->dev);
589
590 bdp->flags = 0;
591 bdp->ifs_state = 0;
592 bdp->ifs_value = 0;
593 bdp->scb = 0;
594
595 init_timer(&bdp->nontx_timer_id);
596 bdp->nontx_timer_id.data = (unsigned long) bdp;
597 bdp->nontx_timer_id.function = (void *) &e100_non_tx_background;
598 INIT_LIST_HEAD(&(bdp->non_tx_cmd_list));
599 bdp->non_tx_command_state = E100_NON_TX_IDLE;
600
601 init_timer(&bdp->watchdog_timer);
602 bdp->watchdog_timer.data = (unsigned long) dev;
603 bdp->watchdog_timer.function = (void *) &e100_watchdog;
604
605 if ((rc = e100_pci_setup(pcid, bdp)) != 0) {
606 goto err_dev;
607 }
608
609 if ((rc = e100_alloc_space(bdp)) != 0) {
610 goto err_pci;
611 }
612
613 if (((bdp->pdev->device > 0x1030)
614 && (bdp->pdev->device < 0x103F))
615 || ((bdp->pdev->device >= 0x1050)
616 && (bdp->pdev->device <= 0x1057))
617 || ((bdp->pdev->device >= 0x1064)
618 && (bdp->pdev->device <= 0x106B))
619 || (bdp->pdev->device == 0x2449)
620 || (bdp->pdev->device == 0x2459)
621 || (bdp->pdev->device == 0x245D)) {
622 bdp->rev_id = D101MA_REV_ID; /* workaround for ICH3 */
623 bdp->flags |= IS_ICH;
624 }
625
626 if (bdp->rev_id == 0xff)
627 bdp->rev_id = 1;
628
629 if ((u8) bdp->rev_id >= D101A4_REV_ID)
630 bdp->flags |= IS_BACHELOR;
631
632 if ((u8) bdp->rev_id >= D102_REV_ID) {
633 bdp->flags |= USE_IPCB;
634 bdp->rfd_size = 32;
635 } else {
636 bdp->rfd_size = 16;
637 }
638
639 dev->vlan_rx_register = e100_vlan_rx_register;
640 dev->vlan_rx_add_vid = e100_vlan_rx_add_vid;
641 dev->vlan_rx_kill_vid = e100_vlan_rx_kill_vid;
642 dev->irq = pcid->irq;
643 dev->open = &e100_open;
644 dev->hard_start_xmit = &e100_xmit_frame;
645 dev->stop = &e100_close;
646 dev->change_mtu = &e100_change_mtu;
647 dev->get_stats = &e100_get_stats;
648 dev->set_multicast_list = &e100_set_multi;
649 dev->set_mac_address = &e100_set_mac;
650 dev->do_ioctl = &e100_ioctl;
651
652 #ifdef CONFIG_NET_POLL_CONTROLLER
653 dev->poll_controller = e100_netpoll;
654 #endif
655
656 if (bdp->flags & USE_IPCB)
657 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM |
658 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
659
660 if ((rc = register_netdev(dev)) != 0) {
661 goto err_dealloc;
662 }
663
664 e100_check_options(e100nics, bdp);
665
666 if (!e100_init(bdp)) {
667 printk(KERN_ERR "e100: Failed to initialize, instance #%d\n",
668 e100nics);
669 rc = -ENODEV;
670 goto err_unregister_netdev;
671 }
672
673 /* Check if checksum is valid */
674 cal_checksum = e100_eeprom_calculate_chksum(bdp);
675 read_checksum = e100_eeprom_read(bdp, (bdp->eeprom_size - 1));
676 if (cal_checksum != read_checksum) {
677 printk(KERN_ERR "e100: Corrupted EEPROM on instance #%d\n",
678 e100nics);
679 rc = -ENODEV;
680 goto err_unregister_netdev;
681 }
682
683 e100nics++;
684
685 e100_get_speed_duplex_caps(bdp);
686
687 printk(KERN_NOTICE
688 "e100: %s: %s\n",
689 bdp->device->name, "Intel(R) PRO/100 Network Connection");
690 e100_print_brd_conf(bdp);
691
692 bdp->wolsupported = 0;
693 bdp->wolopts = 0;
694 if (bdp->rev_id >= D101A4_REV_ID)
695 bdp->wolsupported = WAKE_PHY | WAKE_MAGIC;
696 if (bdp->rev_id >= D101MA_REV_ID)
697 bdp->wolsupported |= WAKE_UCAST | WAKE_ARP;
698
699 /* Check if WoL is enabled on EEPROM */
700 if (e100_eeprom_read(bdp, EEPROM_ID_WORD) & BIT_5) {
701 /* Magic Packet WoL is enabled on device by default */
702 /* if EEPROM WoL bit is TRUE */
703 bdp->wolopts = WAKE_MAGIC;
704 }
705
706 printk(KERN_NOTICE "\n");
707
708 goto out;
709
710 err_unregister_netdev:
711 unregister_netdev(dev);
712 err_dealloc:
713 e100_dealloc_space(bdp);
714 err_pci:
715 iounmap(bdp->scb);
716 pci_release_regions(pcid);
717 pci_disable_device(pcid);
718 err_dev:
719 pci_set_drvdata(pcid, NULL);
720 free_netdev(dev);
721 out:
722 return rc;
723 }
724
725 /**
726 * e100_clear_structs - free resources
727 * @dev: adapter's net_device struct
728 *
729 * Free all device specific structs, unmap i/o address, etc.
730 */
731 static void __devexit
e100_clear_structs(struct net_device * dev)732 e100_clear_structs(struct net_device *dev)
733 {
734 struct e100_private *bdp = dev->priv;
735
736 iounmap(bdp->scb);
737 pci_release_regions(bdp->pdev);
738 pci_disable_device(bdp->pdev);
739
740 e100_dealloc_space(bdp);
741 pci_set_drvdata(bdp->pdev, NULL);
742 free_netdev(dev);
743 }
744
745 static void __devexit
e100_remove1(struct pci_dev * pcid)746 e100_remove1(struct pci_dev *pcid)
747 {
748 struct net_device *dev;
749 struct e100_private *bdp;
750
751 if (!(dev = (struct net_device *) pci_get_drvdata(pcid)))
752 return;
753
754 bdp = dev->priv;
755
756 unregister_netdev(dev);
757
758 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
759
760 if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
761 del_timer_sync(&bdp->nontx_timer_id);
762 e100_free_nontx_list(bdp);
763 bdp->non_tx_command_state = E100_NON_TX_IDLE;
764 }
765
766 e100_clear_structs(dev);
767
768 --e100nics;
769 }
770
771 static struct pci_device_id e100_id_table[] = {
772 {0x8086, 0x1229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
773 {0x8086, 0x2449, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
774 {0x8086, 0x1059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
775 {0x8086, 0x1209, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
776 {0x8086, 0x1029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
777 {0x8086, 0x1030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
778 {0x8086, 0x1031, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
779 {0x8086, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
780 {0x8086, 0x1033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
781 {0x8086, 0x1034, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
782 {0x8086, 0x1038, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
783 {0x8086, 0x1039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
784 {0x8086, 0x103A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
785 {0x8086, 0x103B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
786 {0x8086, 0x103C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
787 {0x8086, 0x103D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
788 {0x8086, 0x103E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
789 {0x8086, 0x1050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
790 {0x8086, 0x1051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
791 {0x8086, 0x1052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
792 {0x8086, 0x1053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
793 {0x8086, 0x1054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
794 {0x8086, 0x1055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
795 {0x8086, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
796 {0x8086, 0x1065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
797 {0x8086, 0x1066, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
798 {0x8086, 0x1067, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
799 {0x8086, 0x1068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
800 {0x8086, 0x1069, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
801 {0x8086, 0x106A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
802 {0x8086, 0x106B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
803 {0x8086, 0x2459, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
804 {0x8086, 0x245D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
805 {0,} /* This has to be the last entry*/
806 };
807 MODULE_DEVICE_TABLE(pci, e100_id_table);
808
809 static struct pci_driver e100_driver = {
810 .name = "e100",
811 .id_table = e100_id_table,
812 .probe = e100_found1,
813 .remove = __devexit_p(e100_remove1),
814 #ifdef CONFIG_PM
815 .suspend = e100_suspend,
816 .resume = e100_resume,
817 #endif
818 };
819
820 static int __init
e100_init_module(void)821 e100_init_module(void)
822 {
823 int ret;
824 ret = pci_module_init(&e100_driver);
825
826 if(ret >= 0) {
827 #ifdef CONFIG_PM
828 register_reboot_notifier(&e100_notifier_reboot);
829 #endif
830 }
831
832 return ret;
833 }
834
835 static void __exit
e100_cleanup_module(void)836 e100_cleanup_module(void)
837 {
838 #ifdef CONFIG_PM
839 unregister_reboot_notifier(&e100_notifier_reboot);
840 #endif
841
842 pci_unregister_driver(&e100_driver);
843 }
844
845 module_init(e100_init_module);
846 module_exit(e100_cleanup_module);
847
848 /**
849 * e100_check_options - check command line options
850 * @board: board number
851 * @bdp: atapter's private data struct
852 *
853 * This routine does range checking on command-line options
854 */
855 void __devinit
e100_check_options(int board,struct e100_private * bdp)856 e100_check_options(int board, struct e100_private *bdp)
857 {
858 if (board >= E100_MAX_NIC) {
859 printk(KERN_NOTICE
860 "e100: No configuration available for board #%d\n",
861 board);
862 printk(KERN_NOTICE "e100: Using defaults for all values\n");
863 board = E100_MAX_NIC;
864 }
865
866 e100_set_int_option(&(bdp->params.TxDescriptors), TxDescriptors[board],
867 E100_MIN_TCB, E100_MAX_TCB, E100_DEFAULT_TCB,
868 "TxDescriptor count");
869
870 e100_set_int_option(&(bdp->params.RxDescriptors), RxDescriptors[board],
871 E100_MIN_RFD, E100_MAX_RFD, E100_DEFAULT_RFD,
872 "RxDescriptor count");
873
874 e100_set_int_option(&(bdp->params.e100_speed_duplex),
875 e100_speed_duplex[board], 0, 4,
876 E100_DEFAULT_SPEED_DUPLEX, "speed/duplex mode");
877
878 e100_set_int_option(&(bdp->params.ber), ber[board], 0, ZLOCK_MAX_ERRORS,
879 E100_DEFAULT_BER, "Bit Error Rate count");
880
881 e100_set_bool_option(bdp, XsumRX[board], PRM_XSUMRX, E100_DEFAULT_XSUM,
882 "XsumRX value");
883
884 /* Default ucode value depended on controller revision */
885 if (bdp->rev_id >= D101MA_REV_ID) {
886 e100_set_bool_option(bdp, ucode[board], PRM_UCODE,
887 E100_DEFAULT_UCODE, "ucode value");
888 } else {
889 e100_set_bool_option(bdp, ucode[board], PRM_UCODE, false,
890 "ucode value");
891 }
892
893 e100_set_bool_option(bdp, flow_control[board], PRM_FC, E100_DEFAULT_FC,
894 "flow control value");
895
896 e100_set_bool_option(bdp, IFS[board], PRM_IFS, E100_DEFAULT_IFS,
897 "IFS value");
898
899 e100_set_bool_option(bdp, BundleSmallFr[board], PRM_BUNDLE_SMALL,
900 E100_DEFAULT_BUNDLE_SMALL_FR,
901 "CPU saver bundle small frames value");
902
903 e100_set_int_option(&(bdp->params.IntDelay), IntDelay[board], 0x0,
904 0xFFFF, E100_DEFAULT_CPUSAVER_INTERRUPT_DELAY,
905 "CPU saver interrupt delay value");
906
907 e100_set_int_option(&(bdp->params.BundleMax), BundleMax[board], 0x1,
908 0xFFFF, E100_DEFAULT_CPUSAVER_BUNDLE_MAX,
909 "CPU saver bundle max value");
910
911 }
912
913 /**
914 * e100_set_int_option - check and set an integer option
915 * @option: a pointer to the relevant option field
916 * @val: the value specified
917 * @min: the minimum valid value
918 * @max: the maximum valid value
919 * @default_val: the default value
920 * @name: the name of the option
921 *
922 * This routine does range checking on a command-line option.
923 * If the option's value is '-1' use the specified default.
924 * Otherwise, if the value is invalid, change it to the default.
925 */
926 void __devinit
e100_set_int_option(int * option,int val,int min,int max,int default_val,char * name)927 e100_set_int_option(int *option, int val, int min, int max, int default_val,
928 char *name)
929 {
930 if (val == -1) { /* no value specified. use default */
931 *option = default_val;
932
933 } else if ((val < min) || (val > max)) {
934 printk(KERN_NOTICE
935 "e100: Invalid %s specified (%i). "
936 "Valid range is %i-%i\n",
937 name, val, min, max);
938 printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
939 default_val);
940 *option = default_val;
941 } else {
942 printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
943 *option = val;
944 }
945 }
946
947 /**
948 * e100_set_bool_option - check and set a boolean option
949 * @bdp: atapter's private data struct
950 * @val: the value specified
951 * @mask: the mask for the relevant option
952 * @default_val: the default value
953 * @name: the name of the option
954 *
955 * This routine checks a boolean command-line option.
956 * If the option's value is '-1' use the specified default.
957 * Otherwise, if the value is invalid (not 0 or 1),
958 * change it to the default.
959 */
960 void __devinit
e100_set_bool_option(struct e100_private * bdp,int val,u32 mask,int default_val,char * name)961 e100_set_bool_option(struct e100_private *bdp, int val, u32 mask,
962 int default_val, char *name)
963 {
964 if (val == -1) {
965 if (default_val)
966 bdp->params.b_params |= mask;
967
968 } else if ((val != true) && (val != false)) {
969 printk(KERN_NOTICE
970 "e100: Invalid %s specified (%i). "
971 "Valid values are %i/%i\n",
972 name, val, false, true);
973 printk(KERN_NOTICE "e100: Using default %s of %i\n", name,
974 default_val);
975
976 if (default_val)
977 bdp->params.b_params |= mask;
978 } else {
979 printk(KERN_INFO "e100: Using specified %s of %i\n", name, val);
980 if (val)
981 bdp->params.b_params |= mask;
982 }
983 }
984
985 int
e100_open(struct net_device * dev)986 e100_open(struct net_device *dev)
987 {
988 struct e100_private *bdp;
989 int rc = 0;
990
991 bdp = dev->priv;
992
993 /* setup the tcb pool */
994 if (!e100_alloc_tcb_pool(bdp)) {
995 rc = -ENOMEM;
996 goto err_exit;
997 }
998 bdp->last_tcb = NULL;
999
1000 bdp->tcb_pool.head = 0;
1001 bdp->tcb_pool.tail = 1;
1002
1003 e100_setup_tcb_pool((tcb_t *) bdp->tcb_pool.data,
1004 bdp->params.TxDescriptors, bdp);
1005
1006 if (!e100_alloc_rfd_pool(bdp)) {
1007 rc = -ENOMEM;
1008 goto err_exit;
1009 }
1010
1011 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0)) {
1012 rc = -EAGAIN;
1013 goto err_exit;
1014 }
1015
1016 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0)) {
1017 rc = -EAGAIN;
1018 goto err_exit;
1019 }
1020
1021 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
1022
1023 if (dev->flags & IFF_UP)
1024 /* Otherwise process may sleep forever */
1025 netif_wake_queue(dev);
1026 else
1027 netif_start_queue(dev);
1028
1029 e100_start_ru(bdp);
1030 if ((rc = request_irq(dev->irq, &e100intr, SA_SHIRQ,
1031 dev->name, dev)) != 0) {
1032 del_timer_sync(&bdp->watchdog_timer);
1033 goto err_exit;
1034 }
1035 bdp->intr_mask = 0;
1036 e100_set_intr_mask(bdp);
1037
1038 e100_force_config(bdp);
1039
1040 goto exit;
1041
1042 err_exit:
1043 e100_clear_pools(bdp);
1044 exit:
1045 return rc;
1046 }
1047
1048 int
e100_close(struct net_device * dev)1049 e100_close(struct net_device *dev)
1050 {
1051 struct e100_private *bdp = dev->priv;
1052
1053 e100_disable_clear_intr(bdp);
1054 free_irq(dev->irq, dev);
1055 bdp->intr_mask = SCB_INT_MASK;
1056 e100_isolate_driver(bdp);
1057
1058 netif_carrier_off(bdp->device);
1059 bdp->cur_line_speed = 0;
1060 bdp->cur_dplx_mode = 0;
1061 e100_clear_pools(bdp);
1062
1063 return 0;
1064 }
1065
1066 static int
e100_change_mtu(struct net_device * dev,int new_mtu)1067 e100_change_mtu(struct net_device *dev, int new_mtu)
1068 {
1069 if ((new_mtu < 68) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
1070 return -EINVAL;
1071
1072 dev->mtu = new_mtu;
1073 return 0;
1074 }
1075
1076 /**
1077 * e100_prepare_xmit_buff - prepare a buffer for transmission
1078 * @bdp: atapter's private data struct
1079 * @skb: skb to send
1080 *
1081 * This routine prepare a buffer for transmission. It checks
1082 * the message length for the appropiate size. It picks up a
1083 * free tcb from the TCB pool and sets up the corresponding
1084 * TBD's. If the number of fragments are more than the number
1085 * of TBD/TCB it copies all the fragments in a coalesce buffer.
1086 * It returns a pointer to the prepared TCB.
1087 */
1088 static inline tcb_t *
e100_prepare_xmit_buff(struct e100_private * bdp,struct sk_buff * skb)1089 e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
1090 {
1091 tcb_t *tcb, *prev_tcb;
1092
1093 tcb = bdp->tcb_pool.data;
1094 tcb += TCB_TO_USE(bdp->tcb_pool);
1095
1096 if (bdp->flags & USE_IPCB) {
1097 tcb->tcbu.ipcb.ip_activation_high = IPCB_IP_ACTIVATION_DEFAULT;
1098 tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCP_PACKET;
1099 tcb->tcbu.ipcb.ip_schedule &= ~IPCB_TCPUDP_CHECKSUM_ENABLE;
1100 }
1101
1102 if(bdp->vlgrp && vlan_tx_tag_present(skb)) {
1103 (tcb->tcbu).ipcb.ip_activation_high |= IPCB_INSERTVLAN_ENABLE;
1104 (tcb->tcbu).ipcb.vlan = cpu_to_be16(vlan_tx_tag_get(skb));
1105 }
1106
1107 tcb->tcb_hdr.cb_status = 0;
1108 tcb->tcb_thrshld = bdp->tx_thld;
1109 tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_S_BIT);
1110
1111 /* Set I (Interrupt) bit on every (TX_FRAME_CNT)th packet */
1112 if (!(++bdp->tx_count % TX_FRAME_CNT))
1113 tcb->tcb_hdr.cb_cmd |= __constant_cpu_to_le16(CB_I_BIT);
1114 else
1115 /* Clear I bit on other packets */
1116 tcb->tcb_hdr.cb_cmd &= ~__constant_cpu_to_le16(CB_I_BIT);
1117
1118 tcb->tcb_skb = skb;
1119
1120 if (skb->ip_summed == CHECKSUM_HW) {
1121 const struct iphdr *ip = skb->nh.iph;
1122
1123 if ((ip->protocol == IPPROTO_TCP) ||
1124 (ip->protocol == IPPROTO_UDP)) {
1125
1126 tcb->tcbu.ipcb.ip_activation_high |=
1127 IPCB_HARDWAREPARSING_ENABLE;
1128 tcb->tcbu.ipcb.ip_schedule |=
1129 IPCB_TCPUDP_CHECKSUM_ENABLE;
1130
1131 if (ip->protocol == IPPROTO_TCP)
1132 tcb->tcbu.ipcb.ip_schedule |= IPCB_TCP_PACKET;
1133 }
1134 }
1135
1136 if (!skb_shinfo(skb)->nr_frags) {
1137 (tcb->tbd_ptr)->tbd_buf_addr =
1138 cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
1139 skb->len, PCI_DMA_TODEVICE));
1140 (tcb->tbd_ptr)->tbd_buf_cnt = cpu_to_le16(skb->len);
1141 tcb->tcb_tbd_num = 1;
1142 tcb->tcb_tbd_ptr = tcb->tcb_tbd_dflt_ptr;
1143 } else {
1144 int i;
1145 void *addr;
1146 tbd_t *tbd_arr_ptr = &(tcb->tbd_ptr[1]);
1147 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1148
1149 (tcb->tbd_ptr)->tbd_buf_addr =
1150 cpu_to_le32(pci_map_single(bdp->pdev, skb->data,
1151 skb_headlen(skb),
1152 PCI_DMA_TODEVICE));
1153 (tcb->tbd_ptr)->tbd_buf_cnt =
1154 cpu_to_le16(skb_headlen(skb));
1155
1156 for (i = 0; i < skb_shinfo(skb)->nr_frags;
1157 i++, tbd_arr_ptr++, frag++) {
1158
1159 addr = ((void *) page_address(frag->page) +
1160 frag->page_offset);
1161
1162 tbd_arr_ptr->tbd_buf_addr =
1163 cpu_to_le32(pci_map_single(bdp->pdev,
1164 addr, frag->size,
1165 PCI_DMA_TODEVICE));
1166 tbd_arr_ptr->tbd_buf_cnt = cpu_to_le16(frag->size);
1167 }
1168 tcb->tcb_tbd_num = skb_shinfo(skb)->nr_frags + 1;
1169 tcb->tcb_tbd_ptr = tcb->tcb_tbd_expand_ptr;
1170 }
1171
1172 /* clear the S-BIT on the previous tcb */
1173 prev_tcb = bdp->tcb_pool.data;
1174 prev_tcb += PREV_TCB_USED(bdp->tcb_pool);
1175 prev_tcb->tcb_hdr.cb_cmd &= __constant_cpu_to_le16((u16) ~CB_S_BIT);
1176
1177 bdp->tcb_pool.tail = NEXT_TCB_TOUSE(bdp->tcb_pool.tail);
1178
1179 wmb();
1180
1181 e100_start_cu(bdp, tcb);
1182
1183 return tcb;
1184 }
1185
1186 static int
e100_xmit_frame(struct sk_buff * skb,struct net_device * dev)1187 e100_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1188 {
1189 int rc = 0;
1190 int notify_stop = false;
1191 struct e100_private *bdp = dev->priv;
1192
1193 if (!spin_trylock(&bdp->bd_non_tx_lock)) {
1194 notify_stop = true;
1195 rc = 1;
1196 goto exit2;
1197 }
1198
1199 /* tcb list may be empty temporarily during releasing resources */
1200 if (!TCBS_AVAIL(bdp->tcb_pool) || (bdp->tcb_phys == 0) ||
1201 (bdp->non_tx_command_state != E100_NON_TX_IDLE)) {
1202 notify_stop = true;
1203 rc = 1;
1204 goto exit1;
1205 }
1206
1207 bdp->drv_stats.net_stats.tx_bytes += skb->len;
1208
1209 e100_prepare_xmit_buff(bdp, skb);
1210
1211 dev->trans_start = jiffies;
1212
1213 exit1:
1214 spin_unlock(&bdp->bd_non_tx_lock);
1215 exit2:
1216 if (notify_stop) {
1217 netif_stop_queue(dev);
1218 }
1219
1220 return rc;
1221 }
1222
1223 /**
1224 * e100_get_stats - get driver statistics
1225 * @dev: adapter's net_device struct
1226 *
1227 * This routine is called when the OS wants the adapter's stats returned.
1228 * It returns the address of the net_device_stats stucture for the device.
1229 * If the statistics are currently being updated, then they might be incorrect
1230 * for a short while. However, since this cannot actually cause damage, no
1231 * locking is used.
1232 */
1233 struct net_device_stats *
e100_get_stats(struct net_device * dev)1234 e100_get_stats(struct net_device *dev)
1235 {
1236 struct e100_private *bdp = dev->priv;
1237
1238 bdp->drv_stats.net_stats.tx_errors =
1239 bdp->drv_stats.net_stats.tx_carrier_errors +
1240 bdp->drv_stats.net_stats.tx_aborted_errors;
1241
1242 bdp->drv_stats.net_stats.rx_errors =
1243 bdp->drv_stats.net_stats.rx_crc_errors +
1244 bdp->drv_stats.net_stats.rx_frame_errors +
1245 bdp->drv_stats.net_stats.rx_length_errors +
1246 bdp->drv_stats.rcv_cdt_frames;
1247
1248 return &(bdp->drv_stats.net_stats);
1249 }
1250
1251 /**
1252 * e100_set_mac - set the MAC address
1253 * @dev: adapter's net_device struct
1254 * @addr: the new address
1255 *
1256 * This routine sets the ethernet address of the board
1257 * Returns:
1258 * 0 - if successful
1259 * -1 - otherwise
1260 */
1261 static int
e100_set_mac(struct net_device * dev,void * addr)1262 e100_set_mac(struct net_device *dev, void *addr)
1263 {
1264 struct e100_private *bdp;
1265 int rc = -1;
1266 struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
1267
1268 if (!is_valid_ether_addr(p_sockaddr->sa_data))
1269 return -EADDRNOTAVAIL;
1270 bdp = dev->priv;
1271
1272 if (e100_setup_iaaddr(bdp, (u8 *) (p_sockaddr->sa_data))) {
1273 memcpy(&(dev->dev_addr[0]), p_sockaddr->sa_data, ETH_ALEN);
1274 rc = 0;
1275 }
1276
1277 return rc;
1278 }
1279
1280 static void
e100_set_multi_exec(struct net_device * dev)1281 e100_set_multi_exec(struct net_device *dev)
1282 {
1283 struct e100_private *bdp = dev->priv;
1284 mltcst_cb_t *mcast_buff;
1285 cb_header_t *cb_hdr;
1286 struct dev_mc_list *mc_list;
1287 unsigned int i;
1288 nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
1289
1290 if (cmd != NULL) {
1291 mcast_buff = &((cmd->non_tx_cmd)->ntcb.multicast);
1292 cb_hdr = &((cmd->non_tx_cmd)->ntcb.multicast.mc_cbhdr);
1293 } else {
1294 return;
1295 }
1296
1297 /* initialize the multi cast command */
1298 cb_hdr->cb_cmd = __constant_cpu_to_le16(CB_MULTICAST);
1299
1300 /* now fill in the rest of the multicast command */
1301 *(u16 *) (&(mcast_buff->mc_count)) = cpu_to_le16(dev->mc_count * 6);
1302 for (i = 0, mc_list = dev->mc_list;
1303 (i < dev->mc_count) && (i < MAX_MULTICAST_ADDRS);
1304 i++, mc_list = mc_list->next) {
1305 /* copy into the command */
1306 memcpy(&(mcast_buff->mc_addr[i * ETH_ALEN]),
1307 (u8 *) &(mc_list->dmi_addr), ETH_ALEN);
1308 }
1309
1310 if (!e100_exec_non_cu_cmd(bdp, cmd)) {
1311 printk(KERN_WARNING "e100: %s: Multicast setup failed\n",
1312 dev->name);
1313 }
1314 }
1315
1316 /**
1317 * e100_set_multi - set multicast status
1318 * @dev: adapter's net_device struct
1319 *
1320 * This routine is called to add or remove multicast addresses, and/or to
1321 * change the adapter's promiscuous state.
1322 */
1323 static void
e100_set_multi(struct net_device * dev)1324 e100_set_multi(struct net_device *dev)
1325 {
1326 struct e100_private *bdp = dev->priv;
1327 unsigned char promisc_enbl;
1328 unsigned char mulcast_enbl;
1329
1330 promisc_enbl = ((dev->flags & IFF_PROMISC) == IFF_PROMISC);
1331 mulcast_enbl = ((dev->flags & IFF_ALLMULTI) ||
1332 (dev->mc_count > MAX_MULTICAST_ADDRS));
1333
1334 e100_config_promisc(bdp, promisc_enbl);
1335 e100_config_mulcast_enbl(bdp, mulcast_enbl);
1336
1337 /* reconfigure the chip if something has changed in its config space */
1338 e100_config(bdp);
1339
1340 if (promisc_enbl || mulcast_enbl) {
1341 return; /* no need for Multicast Cmd */
1342 }
1343
1344 /* get the multicast CB */
1345 e100_set_multi_exec(dev);
1346 }
1347
1348 static int
e100_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1349 e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1350 {
1351
1352 switch (cmd) {
1353
1354 case SIOCETHTOOL:
1355 return e100_do_ethtool_ioctl(dev, ifr);
1356 break;
1357
1358 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1359 case SIOCGMIIREG: /* Read MII PHY register. */
1360 case SIOCSMIIREG: /* Write to MII PHY register. */
1361 return e100_mii_ioctl(dev, ifr, cmd);
1362 break;
1363
1364 default:
1365 return -EOPNOTSUPP;
1366 }
1367 return 0;
1368
1369 }
1370
1371 /**
1372 * e100init - initialize the adapter
1373 * @bdp: atapter's private data struct
1374 *
1375 * This routine is called when this driver is loaded. This is the initialization
1376 * routine which allocates memory, configures the adapter and determines the
1377 * system resources.
1378 *
1379 * Returns:
1380 * true: if successful
1381 * false: otherwise
1382 */
1383 static unsigned char __devinit
e100_init(struct e100_private * bdp)1384 e100_init(struct e100_private *bdp)
1385 {
1386 u32 st_timeout = 0;
1387 u32 st_result = 0;
1388 e100_sw_init(bdp);
1389
1390 if (!e100_selftest(bdp, &st_timeout, &st_result)) {
1391 if (st_timeout) {
1392 printk(KERN_ERR "e100: selftest timeout\n");
1393 } else {
1394 printk(KERN_ERR "e100: selftest failed. Results: %x\n",
1395 st_result);
1396 }
1397 return false;
1398 }
1399 else
1400 printk(KERN_DEBUG "e100: selftest OK.\n");
1401
1402 /* read the MAC address from the eprom */
1403 e100_rd_eaddr(bdp);
1404 if (!is_valid_ether_addr(bdp->device->dev_addr)) {
1405 printk(KERN_ERR "e100: Invalid Ethernet address\n");
1406 return false;
1407 }
1408 /* read NIC's part number */
1409 e100_rd_pwa_no(bdp);
1410
1411 if (!e100_hw_init(bdp))
1412 return false;
1413 /* Interrupts are enabled after device reset */
1414 e100_disable_clear_intr(bdp);
1415
1416 return true;
1417 }
1418
1419 /**
1420 * e100_sw_init - initialize software structs
1421 * @bdp: atapter's private data struct
1422 *
1423 * This routine initializes all software structures. Sets up the
1424 * circular structures for the RFD's & TCB's. Allocates the per board
1425 * structure for storing adapter information. The CSR is also memory
1426 * mapped in this routine.
1427 *
1428 * Returns :
1429 * true: if S/W was successfully initialized
1430 * false: otherwise
1431 */
1432 static unsigned char __devinit
e100_sw_init(struct e100_private * bdp)1433 e100_sw_init(struct e100_private *bdp)
1434 {
1435 bdp->next_cu_cmd = START_WAIT; // init the next cu state
1436
1437 /*
1438 * Set the value for # of good xmits per underrun. the value assigned
1439 * here is an intelligent suggested default. Nothing magical about it.
1440 */
1441 bdp->tx_per_underrun = DEFAULT_TX_PER_UNDERRUN;
1442
1443 /* get the default transmit threshold value */
1444 bdp->tx_thld = TX_THRSHLD;
1445
1446 /* get the EPROM size */
1447 bdp->eeprom_size = e100_eeprom_size(bdp);
1448
1449 /* Initialize our spinlocks */
1450 spin_lock_init(&(bdp->bd_lock));
1451 spin_lock_init(&(bdp->bd_non_tx_lock));
1452 spin_lock_init(&(bdp->config_lock));
1453 spin_lock_init(&(bdp->mdi_access_lock));
1454 /* Initialize configuration data */
1455 e100_config_init(bdp);
1456
1457 return 1;
1458 }
1459
1460 static void
e100_tco_workaround(struct e100_private * bdp)1461 e100_tco_workaround(struct e100_private *bdp)
1462 {
1463 int i;
1464
1465 /* Do software reset */
1466 e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
1467
1468 /* Do a dummy LOAD CU BASE command. */
1469 /* This gets us out of pre-driver to post-driver. */
1470 e100_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE);
1471
1472 /* Wait 20 msec for reset to take effect */
1473 set_current_state(TASK_UNINTERRUPTIBLE);
1474 schedule_timeout(HZ / 50 + 1);
1475
1476 /* disable interrupts since they are enabled */
1477 /* after device reset */
1478 e100_disable_clear_intr(bdp);
1479
1480 /* Wait for command to be cleared up to 1 sec */
1481 for (i=0; i<100; i++) {
1482 if (!readb(&bdp->scb->scb_cmd_low))
1483 break;
1484 set_current_state(TASK_UNINTERRUPTIBLE);
1485 schedule_timeout(HZ / 100 + 1);
1486 }
1487
1488 /* Wait for TCO request bit in PMDR register to be clear */
1489 for (i=0; i<50; i++) {
1490 if (!(readb(&bdp->scb->scb_ext.d101m_scb.scb_pmdr) & BIT_1))
1491 break;
1492 set_current_state(TASK_UNINTERRUPTIBLE);
1493 schedule_timeout(HZ / 100 + 1);
1494 }
1495 }
1496
1497 /**
1498 * e100_hw_init - initialized tthe hardware
1499 * @bdp: atapter's private data struct
1500 *
1501 * This routine performs a reset on the adapter, and configures the adapter.
1502 * This includes configuring the 82557 LAN controller, validating and setting
1503 * the node address, detecting and configuring the Phy chip on the adapter,
1504 * and initializing all of the on chip counters.
1505 *
1506 * Returns:
1507 * true - If the adapter was initialized
1508 * false - If the adapter failed initialization
1509 */
1510 unsigned char
e100_hw_init(struct e100_private * bdp)1511 e100_hw_init(struct e100_private *bdp)
1512 {
1513 if (!e100_phy_init(bdp))
1514 goto err;
1515
1516 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
1517
1518 /* Only 82559 or above needs TCO workaround */
1519 if (bdp->rev_id >= D101MA_REV_ID)
1520 e100_tco_workaround(bdp);
1521
1522 /* Load the CU BASE (set to 0, because we use linear mode) */
1523 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
1524 goto err;
1525
1526 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
1527 goto err;
1528
1529 /* Load interrupt microcode */
1530 if (e100_load_microcode(bdp)) {
1531 bdp->flags |= DF_UCODE_LOADED;
1532 }
1533
1534 if ((u8) bdp->rev_id < D101A4_REV_ID)
1535 e100_config_init_82557(bdp);
1536
1537 if (!e100_config(bdp))
1538 goto err;
1539
1540 if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr))
1541 goto err;
1542
1543 /* Clear the internal counters */
1544 if (!e100_clr_cntrs(bdp))
1545 goto err;
1546
1547 /* Change for 82558 enhancement */
1548 /* If 82558/9 and if the user has enabled flow control, set up the
1549 * Flow Control Reg. in the CSR */
1550 if ((bdp->flags & IS_BACHELOR)
1551 && (bdp->params.b_params & PRM_FC)) {
1552 writeb(DFLT_FC_THLD, &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
1553 writeb(DFLT_FC_CMD,
1554 &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
1555 }
1556
1557 return true;
1558 err:
1559 printk(KERN_ERR "e100: hw init failed\n");
1560 return false;
1561 }
1562
1563 /**
1564 * e100_setup_tcb_pool - setup TCB circular list
1565 * @head: Pointer to head of the allocated TCBs
1566 * @qlen: Number of elements in the queue
1567 * @bdp: atapter's private data struct
1568 *
1569 * This routine arranges the contigiously allocated TCB's in a circular list.
1570 * Also does the one time initialization of the TCBs.
1571 */
1572 static void
e100_setup_tcb_pool(tcb_t * head,unsigned int qlen,struct e100_private * bdp)1573 e100_setup_tcb_pool(tcb_t *head, unsigned int qlen, struct e100_private *bdp)
1574 {
1575 int ele_no;
1576 tcb_t *pcurr_tcb; /* point to current tcb */
1577 u32 next_phys; /* the next phys addr */
1578 u16 txcommand = CB_S_BIT | CB_TX_SF_BIT;
1579
1580 bdp->tx_count = 0;
1581 if (bdp->flags & USE_IPCB) {
1582 txcommand |= CB_IPCB_TRANSMIT | CB_CID_DEFAULT;
1583 } else if (bdp->flags & IS_BACHELOR) {
1584 txcommand |= CB_TRANSMIT | CB_CID_DEFAULT;
1585 } else {
1586 txcommand |= CB_TRANSMIT;
1587 }
1588
1589 for (ele_no = 0, next_phys = bdp->tcb_phys, pcurr_tcb = head;
1590 ele_no < qlen; ele_no++, pcurr_tcb++) {
1591
1592 /* set the phys addr for this TCB, next_phys has not incr. yet */
1593 pcurr_tcb->tcb_phys = next_phys;
1594 next_phys += sizeof (tcb_t);
1595
1596 /* set the link to next tcb */
1597 if (ele_no == (qlen - 1))
1598 pcurr_tcb->tcb_hdr.cb_lnk_ptr =
1599 cpu_to_le32(bdp->tcb_phys);
1600 else
1601 pcurr_tcb->tcb_hdr.cb_lnk_ptr = cpu_to_le32(next_phys);
1602
1603 pcurr_tcb->tcb_hdr.cb_status = 0;
1604 pcurr_tcb->tcb_hdr.cb_cmd = cpu_to_le16(txcommand);
1605 pcurr_tcb->tcb_cnt = 0;
1606 pcurr_tcb->tcb_thrshld = bdp->tx_thld;
1607 if (ele_no < 2) {
1608 pcurr_tcb->tcb_hdr.cb_status =
1609 cpu_to_le16(CB_STATUS_COMPLETE);
1610 }
1611 pcurr_tcb->tcb_tbd_num = 1;
1612
1613 if (bdp->flags & IS_BACHELOR) {
1614 pcurr_tcb->tcb_tbd_ptr =
1615 __constant_cpu_to_le32(0xFFFFFFFF);
1616 } else {
1617 pcurr_tcb->tcb_tbd_ptr =
1618 cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
1619 }
1620
1621 if (bdp->flags & IS_BACHELOR) {
1622 pcurr_tcb->tcb_tbd_expand_ptr =
1623 cpu_to_le32(pcurr_tcb->tcb_phys + 0x20);
1624 } else {
1625 pcurr_tcb->tcb_tbd_expand_ptr =
1626 cpu_to_le32(pcurr_tcb->tcb_phys + 0x10);
1627 }
1628 pcurr_tcb->tcb_tbd_dflt_ptr = pcurr_tcb->tcb_tbd_ptr;
1629
1630 if (bdp->flags & USE_IPCB) {
1631 pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[1]);
1632 pcurr_tcb->tcbu.ipcb.ip_activation_high =
1633 IPCB_IP_ACTIVATION_DEFAULT;
1634 pcurr_tcb->tcbu.ipcb.vlan = 0;
1635 } else {
1636 pcurr_tcb->tbd_ptr = &(pcurr_tcb->tcbu.tbd_array[0]);
1637 }
1638
1639 pcurr_tcb->tcb_skb = NULL;
1640 }
1641
1642 wmb();
1643 }
1644
1645 /***************************************************************************/
1646 /***************************************************************************/
1647 /* Memory Management Routines */
1648 /***************************************************************************/
1649
1650 /**
1651 * e100_alloc_space - allocate private driver data
1652 * @bdp: atapter's private data struct
1653 *
1654 * This routine allocates memory for the driver. Memory allocated is for the
1655 * selftest and statistics structures.
1656 *
1657 * Returns:
1658 * 0: if the operation was successful
1659 * %-ENOMEM: if memory allocation failed
1660 */
1661 unsigned char __devinit
e100_alloc_space(struct e100_private * bdp)1662 e100_alloc_space(struct e100_private *bdp)
1663 {
1664 unsigned long off;
1665
1666 /* allocate all the dma-able structures in one call:
1667 * selftest results, adapter stats, and non-tx cb commands */
1668 if (!(bdp->dma_able =
1669 pci_alloc_consistent(bdp->pdev, sizeof (bd_dma_able_t),
1670 &(bdp->dma_able_phys)))) {
1671 goto err;
1672 }
1673
1674 /* now assign the various pointers into the struct we've just allocated */
1675 off = offsetof(bd_dma_able_t, selftest);
1676
1677 bdp->selftest = (self_test_t *) (bdp->dma_able + off);
1678 bdp->selftest_phys = bdp->dma_able_phys + off;
1679
1680 off = offsetof(bd_dma_able_t, stats_counters);
1681
1682 bdp->stats_counters = (max_counters_t *) (bdp->dma_able + off);
1683 bdp->stat_cnt_phys = bdp->dma_able_phys + off;
1684
1685 return 0;
1686
1687 err:
1688 printk(KERN_ERR
1689 "e100: Failed to allocate memory\n");
1690 return -ENOMEM;
1691 }
1692
1693 /**
1694 * e100_alloc_tcb_pool - allocate TCB circular list
1695 * @bdp: atapter's private data struct
1696 *
1697 * This routine allocates memory for the circular list of transmit descriptors.
1698 *
1699 * Returns:
1700 * 0: if allocation has failed.
1701 * 1: Otherwise.
1702 */
1703 int
e100_alloc_tcb_pool(struct e100_private * bdp)1704 e100_alloc_tcb_pool(struct e100_private *bdp)
1705 {
1706 int stcb = sizeof (tcb_t) * bdp->params.TxDescriptors;
1707
1708 /* allocate space for the TCBs */
1709 if (!(bdp->tcb_pool.data =
1710 pci_alloc_consistent(bdp->pdev, stcb, &bdp->tcb_phys)))
1711 return 0;
1712
1713 memset(bdp->tcb_pool.data, 0x00, stcb);
1714
1715 return 1;
1716 }
1717
1718 /**
1719 * e100_tx_skb_free - free TX skbs resources
1720 * @bdp: atapter's private data struct
1721 * @tcb: associated tcb of the freed skb
1722 *
1723 * This routine frees resources of TX skbs.
1724 */
1725 static inline void
e100_tx_skb_free(struct e100_private * bdp,tcb_t * tcb)1726 e100_tx_skb_free(struct e100_private *bdp, tcb_t *tcb)
1727 {
1728 if (tcb->tcb_skb) {
1729 int i;
1730 tbd_t *tbd_arr = tcb->tbd_ptr;
1731 int frags = skb_shinfo(tcb->tcb_skb)->nr_frags;
1732
1733 for (i = 0; i <= frags; i++, tbd_arr++) {
1734 pci_unmap_single(bdp->pdev,
1735 le32_to_cpu(tbd_arr->tbd_buf_addr),
1736 le16_to_cpu(tbd_arr->tbd_buf_cnt),
1737 PCI_DMA_TODEVICE);
1738 }
1739 dev_kfree_skb_irq(tcb->tcb_skb);
1740 tcb->tcb_skb = NULL;
1741 }
1742 }
1743
1744 void
e100_free_tcb_pool(struct e100_private * bdp)1745 e100_free_tcb_pool(struct e100_private *bdp)
1746 {
1747 tcb_t *tcb;
1748 int i;
1749 /* Return tx skbs */
1750 for (i = 0; i < bdp->params.TxDescriptors; i++) {
1751 tcb = bdp->tcb_pool.data;
1752 tcb += bdp->tcb_pool.head;
1753 e100_tx_skb_free(bdp, tcb);
1754 if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail)
1755 break;
1756 bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);
1757 }
1758 pci_free_consistent(bdp->pdev,
1759 sizeof (tcb_t) * bdp->params.TxDescriptors,
1760 bdp->tcb_pool.data, bdp->tcb_phys);
1761 bdp->tcb_pool.head = 0;
1762 bdp->tcb_pool.tail = 1;
1763 bdp->tcb_phys = 0;
1764 }
1765
1766 static void
e100_dealloc_space(struct e100_private * bdp)1767 e100_dealloc_space(struct e100_private *bdp)
1768 {
1769 if (bdp->dma_able) {
1770 pci_free_consistent(bdp->pdev, sizeof (bd_dma_able_t),
1771 bdp->dma_able, bdp->dma_able_phys);
1772 }
1773
1774 bdp->selftest_phys = 0;
1775 bdp->stat_cnt_phys = 0;
1776 bdp->dma_able_phys = 0;
1777 bdp->dma_able = 0;
1778 }
1779
1780 static void
e100_free_rfd_pool(struct e100_private * bdp)1781 e100_free_rfd_pool(struct e100_private *bdp)
1782 {
1783 struct rx_list_elem *rx_struct;
1784
1785 while (!list_empty(&(bdp->active_rx_list))) {
1786
1787 rx_struct = list_entry(bdp->active_rx_list.next,
1788 struct rx_list_elem, list_elem);
1789 list_del(&(rx_struct->list_elem));
1790 pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
1791 sizeof (rfd_t), PCI_DMA_TODEVICE);
1792 dev_kfree_skb(rx_struct->skb);
1793 kfree(rx_struct);
1794 }
1795
1796 while (!list_empty(&(bdp->rx_struct_pool))) {
1797 rx_struct = list_entry(bdp->rx_struct_pool.next,
1798 struct rx_list_elem, list_elem);
1799 list_del(&(rx_struct->list_elem));
1800 kfree(rx_struct);
1801 }
1802 }
1803
1804 /**
1805 * e100_alloc_rfd_pool - allocate RFDs
1806 * @bdp: atapter's private data struct
1807 *
1808 * Allocates initial pool of skb which holds both rfd and data,
1809 * and return a pointer to the head of the list
1810 */
1811 static int
e100_alloc_rfd_pool(struct e100_private * bdp)1812 e100_alloc_rfd_pool(struct e100_private *bdp)
1813 {
1814 struct rx_list_elem *rx_struct;
1815 int i;
1816
1817 INIT_LIST_HEAD(&(bdp->active_rx_list));
1818 INIT_LIST_HEAD(&(bdp->rx_struct_pool));
1819 bdp->skb_req = bdp->params.RxDescriptors;
1820 for (i = 0; i < bdp->skb_req; i++) {
1821 rx_struct = kmalloc(sizeof (struct rx_list_elem), GFP_ATOMIC);
1822 list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
1823 }
1824 e100_alloc_skbs(bdp);
1825 return !list_empty(&(bdp->active_rx_list));
1826
1827 }
1828
1829 void
e100_clear_pools(struct e100_private * bdp)1830 e100_clear_pools(struct e100_private *bdp)
1831 {
1832 bdp->last_tcb = NULL;
1833 e100_free_rfd_pool(bdp);
1834 e100_free_tcb_pool(bdp);
1835 }
1836
1837 /*****************************************************************************/
1838 /*****************************************************************************/
1839 /* Run Time Functions */
1840 /*****************************************************************************/
1841
1842 /**
1843 * e100_watchdog
1844 * @dev: adapter's net_device struct
1845 *
1846 * This routine runs every 2 seconds and updates our statitics and link state,
1847 * and refreshs txthld value.
1848 */
1849 void
e100_watchdog(struct net_device * dev)1850 e100_watchdog(struct net_device *dev)
1851 {
1852 struct e100_private *bdp = dev->priv;
1853
1854 #ifdef E100_CU_DEBUG
1855 if (e100_cu_unknown_state(bdp)) {
1856 printk(KERN_ERR "e100: %s: CU unknown state in e100_watchdog\n",
1857 dev->name);
1858 }
1859 #endif
1860 if (!netif_running(dev)) {
1861 return;
1862 }
1863
1864 /* check if link state has changed */
1865 if (e100_phy_check(bdp)) {
1866 if (netif_carrier_ok(dev)) {
1867 printk(KERN_ERR
1868 "e100: %s NIC Link is Up %d Mbps %s duplex\n",
1869 bdp->device->name, bdp->cur_line_speed,
1870 (bdp->cur_dplx_mode == HALF_DUPLEX) ?
1871 "Half" : "Full");
1872
1873 e100_config_fc(bdp);
1874 e100_config(bdp);
1875
1876 } else {
1877 printk(KERN_ERR "e100: %s NIC Link is Down\n",
1878 bdp->device->name);
1879 }
1880 }
1881
1882 // toggle the tx queue according to link status
1883 // this also resolves a race condition between tx & non-cu cmd flows
1884 if (netif_carrier_ok(dev)) {
1885 if (netif_running(dev))
1886 netif_wake_queue(dev);
1887 } else {
1888 if (netif_running(dev))
1889 netif_stop_queue(dev);
1890 /* When changing to non-autoneg, device may lose */
1891 /* link with some switches. e100 will try to */
1892 /* revover link by sending command to PHY layer */
1893 if (bdp->params.e100_speed_duplex != E100_AUTONEG)
1894 e100_force_speed_duplex_to_phy(bdp);
1895 }
1896
1897 rmb();
1898
1899 if (e100_update_stats(bdp)) {
1900
1901 /* Check if a change in the IFS parameter is needed,
1902 and configure the device accordingly */
1903 if (bdp->params.b_params & PRM_IFS)
1904 e100_manage_adaptive_ifs(bdp);
1905
1906 /* Now adjust our dynamic tx threshold value */
1907 e100_refresh_txthld(bdp);
1908
1909 /* Now if we are on a 557 and we havn't received any frames then we
1910 * should issue a multicast command to reset the RU */
1911 if (bdp->rev_id < D101A4_REV_ID) {
1912 if (!(bdp->stats_counters->basic_stats.rcv_gd_frames)) {
1913 e100_set_multi(dev);
1914 }
1915 }
1916 }
1917 /* Issue command to dump statistics from device. */
1918 /* Check for command completion on next watchdog timer. */
1919 e100_dump_stats_cntrs(bdp);
1920
1921 wmb();
1922
1923 /* relaunch watchdog timer in 2 sec */
1924 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
1925
1926 if (list_empty(&bdp->active_rx_list))
1927 e100_trigger_SWI(bdp);
1928 }
1929
1930 /**
1931 * e100_manage_adaptive_ifs
1932 * @bdp: atapter's private data struct
1933 *
1934 * This routine manages the adaptive Inter-Frame Spacing algorithm
1935 * using a state machine.
1936 */
1937 void
e100_manage_adaptive_ifs(struct e100_private * bdp)1938 e100_manage_adaptive_ifs(struct e100_private *bdp)
1939 {
1940 static u16 state_table[9][4] = { // rows are states
1941 {2, 0, 0, 0}, // state0 // column0: next state if increasing
1942 {2, 0, 5, 30}, // state1 // column1: next state if decreasing
1943 {5, 1, 5, 30}, // state2 // column2: IFS value for 100 mbit
1944 {5, 3, 0, 0}, // state3 // column3: IFS value for 10 mbit
1945 {5, 3, 10, 60}, // state4
1946 {8, 4, 10, 60}, // state5
1947 {8, 6, 0, 0}, // state6
1948 {8, 6, 20, 60}, // state7
1949 {8, 7, 20, 60} // state8
1950 };
1951
1952 u32 transmits =
1953 le32_to_cpu(bdp->stats_counters->basic_stats.xmt_gd_frames);
1954 u32 collisions =
1955 le32_to_cpu(bdp->stats_counters->basic_stats.xmt_ttl_coll);
1956 u32 state = bdp->ifs_state;
1957 u32 old_value = bdp->ifs_value;
1958 int next_col;
1959 u32 min_transmits;
1960
1961 if (bdp->cur_dplx_mode == FULL_DUPLEX) {
1962 bdp->ifs_state = 0;
1963 bdp->ifs_value = 0;
1964
1965 } else { /* Half Duplex */
1966 /* Set speed specific parameters */
1967 if (bdp->cur_line_speed == 100) {
1968 next_col = 2;
1969 min_transmits = MIN_NUMBER_OF_TRANSMITS_100;
1970
1971 } else { /* 10 Mbps */
1972 next_col = 3;
1973 min_transmits = MIN_NUMBER_OF_TRANSMITS_10;
1974 }
1975
1976 if ((transmits / 32 < collisions)
1977 && (transmits > min_transmits)) {
1978 state = state_table[state][0]; /* increment */
1979
1980 } else if (transmits < min_transmits) {
1981 state = state_table[state][1]; /* decrement */
1982 }
1983
1984 bdp->ifs_value = state_table[state][next_col];
1985 bdp->ifs_state = state;
1986 }
1987
1988 /* If the IFS value has changed, configure the device */
1989 if (bdp->ifs_value != old_value) {
1990 e100_config_ifs(bdp);
1991 e100_config(bdp);
1992 }
1993 }
1994
1995 /**
1996 * e100intr - interrupt handler
1997 * @irq: the IRQ number
1998 * @dev_inst: the net_device struct
1999 * @regs: registers (unused)
2000 *
2001 * This routine is the ISR for the e100 board. It services
2002 * the RX & TX queues & starts the RU if it has stopped due
2003 * to no resources.
2004 */
2005 irqreturn_t
e100intr(int irq,void * dev_inst,struct pt_regs * regs)2006 e100intr(int irq, void *dev_inst, struct pt_regs *regs)
2007 {
2008 struct net_device *dev;
2009 struct e100_private *bdp;
2010 u16 intr_status;
2011
2012 dev = dev_inst;
2013 bdp = dev->priv;
2014
2015 intr_status = readw(&bdp->scb->scb_status);
2016 /* If not my interrupt, just return */
2017 if (!(intr_status & SCB_STATUS_ACK_MASK) || (intr_status == 0xffff)) {
2018 return IRQ_NONE;
2019 }
2020
2021 /* disable and ack intr */
2022 e100_disable_clear_intr(bdp);
2023
2024 /* the device is closed, don't continue or else bad things may happen. */
2025 if (!netif_running(dev)) {
2026 e100_set_intr_mask(bdp);
2027 return IRQ_NONE;
2028 }
2029
2030 /* SWI intr (triggered by watchdog) is signal to allocate new skb buffers */
2031 if (intr_status & SCB_STATUS_ACK_SWI) {
2032 e100_alloc_skbs(bdp);
2033 }
2034
2035 /* do recv work if any */
2036 if (intr_status &
2037 (SCB_STATUS_ACK_FR | SCB_STATUS_ACK_RNR | SCB_STATUS_ACK_SWI))
2038 bdp->drv_stats.rx_intr_pkts += e100_rx_srv(bdp);
2039
2040 /* clean up after tx'ed packets */
2041 if (intr_status & (SCB_STATUS_ACK_CNA | SCB_STATUS_ACK_CX))
2042 e100_tx_srv(bdp);
2043
2044 e100_set_intr_mask(bdp);
2045 return IRQ_HANDLED;
2046 }
2047
2048 /**
2049 * e100_tx_srv - service TX queues
2050 * @bdp: atapter's private data struct
2051 *
2052 * This routine services the TX queues. It reclaims the TCB's & TBD's & other
2053 * resources used during the transmit of this buffer. It is called from the ISR.
2054 * We don't need a tx_lock since we always access buffers which were already
2055 * prepared.
2056 */
2057 void
e100_tx_srv(struct e100_private * bdp)2058 e100_tx_srv(struct e100_private *bdp)
2059 {
2060 tcb_t *tcb;
2061 int i;
2062
2063 /* go over at most TxDescriptors buffers */
2064 for (i = 0; i < bdp->params.TxDescriptors; i++) {
2065 tcb = bdp->tcb_pool.data;
2066 tcb += bdp->tcb_pool.head;
2067
2068 rmb();
2069
2070 /* if the buffer at 'head' is not complete, break */
2071 if (!(tcb->tcb_hdr.cb_status &
2072 __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
2073 break;
2074
2075 /* service next buffer, clear the out of resource condition */
2076 e100_tx_skb_free(bdp, tcb);
2077
2078 if (netif_running(bdp->device))
2079 netif_wake_queue(bdp->device);
2080
2081 /* if we've caught up with 'tail', break */
2082 if (NEXT_TCB_TOUSE(bdp->tcb_pool.head) == bdp->tcb_pool.tail) {
2083 break;
2084 }
2085
2086 bdp->tcb_pool.head = NEXT_TCB_TOUSE(bdp->tcb_pool.head);
2087 }
2088 }
2089
2090 /**
2091 * e100_rx_srv - service RX queue
2092 * @bdp: atapter's private data struct
2093 * @max_number_of_rfds: max number of RFDs to process
2094 * @rx_congestion: flag pointer, to inform the calling function of congestion.
2095 *
2096 * This routine processes the RX interrupt & services the RX queues.
2097 * For each successful RFD, it allocates a new msg block, links that
2098 * into the RFD list, and sends the old msg upstream.
2099 * The new RFD is then put at the end of the free list of RFD's.
2100 * It returns the number of serviced RFDs.
2101 */
2102 u32
e100_rx_srv(struct e100_private * bdp)2103 e100_rx_srv(struct e100_private *bdp)
2104 {
2105 rfd_t *rfd; /* new rfd, received rfd */
2106 int i;
2107 u16 rfd_status;
2108 struct sk_buff *skb;
2109 struct net_device *dev;
2110 unsigned int data_sz;
2111 struct rx_list_elem *rx_struct;
2112 u32 rfd_cnt = 0;
2113
2114 dev = bdp->device;
2115
2116 /* current design of rx is as following:
2117 * 1. socket buffer (skb) used to pass network packet to upper layer
2118 * 2. all HW host memory structures (like RFDs, RBDs and data buffers)
2119 * are placed in a skb's data room
2120 * 3. when rx process is complete, we change skb internal pointers to exclude
2121 * from data area all unrelated things (RFD, RDB) and to leave
2122 * just rx'ed packet netto
2123 * 4. for each skb passed to upper layer, new one is allocated instead.
2124 * 5. if no skb left, in 2 sec another atempt to allocate skbs will be made
2125 * (watchdog trigger SWI intr and isr should allocate new skbs)
2126 */
2127 for (i = 0; i < bdp->params.RxDescriptors; i++) {
2128 if (list_empty(&(bdp->active_rx_list))) {
2129 break;
2130 }
2131
2132 rx_struct = list_entry(bdp->active_rx_list.next,
2133 struct rx_list_elem, list_elem);
2134 skb = rx_struct->skb;
2135
2136 rfd = RFD_POINTER(skb, bdp); /* locate RFD within skb */
2137
2138 // sync only the RFD header
2139 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2140 bdp->rfd_size, PCI_DMA_FROMDEVICE);
2141 rfd_status = le16_to_cpu(rfd->rfd_header.cb_status); /* get RFD's status */
2142 if (!(rfd_status & RFD_STATUS_COMPLETE)) /* does not contains data yet - exit */
2143 break;
2144
2145 /* to allow manipulation with current skb we need to unlink it */
2146 list_del(&(rx_struct->list_elem));
2147
2148 /* do not free & unmap badly received packet.
2149 * move it to the end of skb list for reuse */
2150 if (!(rfd_status & RFD_STATUS_OK)) {
2151 e100_add_skb_to_end(bdp, rx_struct);
2152 continue;
2153 }
2154
2155 data_sz = min_t(u16, (le16_to_cpu(rfd->rfd_act_cnt) & 0x3fff),
2156 (sizeof (rfd_t) - bdp->rfd_size));
2157
2158 /* now sync all the data */
2159 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2160 (data_sz + bdp->rfd_size),
2161 PCI_DMA_FROMDEVICE);
2162
2163 pci_unmap_single(bdp->pdev, rx_struct->dma_addr,
2164 sizeof (rfd_t), PCI_DMA_FROMDEVICE);
2165
2166 list_add(&(rx_struct->list_elem), &(bdp->rx_struct_pool));
2167
2168 /* end of dma access to rfd */
2169 bdp->skb_req++; /* incr number of requested skbs */
2170 e100_alloc_skbs(bdp); /* and get them */
2171
2172 /* set packet size, excluding checksum (2 last bytes) if it is present */
2173 if ((bdp->flags & DF_CSUM_OFFLOAD)
2174 && (bdp->rev_id < D102_REV_ID))
2175 skb_put(skb, (int) data_sz - 2);
2176 else
2177 skb_put(skb, (int) data_sz);
2178
2179 bdp->drv_stats.net_stats.rx_bytes += skb->len;
2180
2181 /* set the protocol */
2182 skb->protocol = eth_type_trans(skb, dev);
2183
2184 /* set the checksum info */
2185 if (bdp->flags & DF_CSUM_OFFLOAD) {
2186 if (bdp->rev_id >= D102_REV_ID) {
2187 skb->ip_summed = e100_D102_check_checksum(rfd);
2188 } else {
2189 skb->ip_summed = e100_D101M_checksum(bdp, skb);
2190 }
2191 } else {
2192 skb->ip_summed = CHECKSUM_NONE;
2193 }
2194
2195 if(bdp->vlgrp && (rfd_status & CB_STATUS_VLAN)) {
2196 vlan_hwaccel_rx(skb, bdp->vlgrp, be16_to_cpu(rfd->vlanid));
2197 } else {
2198 netif_rx(skb);
2199 }
2200 dev->last_rx = jiffies;
2201
2202 rfd_cnt++;
2203 } /* end of rfd loop */
2204
2205 /* restart the RU if it has stopped */
2206 if ((readw(&bdp->scb->scb_status) & SCB_RUS_MASK) != SCB_RUS_READY) {
2207 e100_start_ru(bdp);
2208 }
2209
2210 return rfd_cnt;
2211 }
2212
2213 void
e100_refresh_txthld(struct e100_private * bdp)2214 e100_refresh_txthld(struct e100_private *bdp)
2215 {
2216 basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
2217
2218 /* as long as tx_per_underrun is not 0, we can go about dynamically *
2219 * adjusting the xmit threshold. we stop doing that & resort to defaults
2220 * * once the adjustments become meaningless. the value is adjusted by *
2221 * dumping the error counters & checking the # of xmit underrun errors *
2222 * we've had. */
2223 if (bdp->tx_per_underrun) {
2224 /* We are going to last values dumped from the dump statistics
2225 * command */
2226 if (le32_to_cpu(pstat->xmt_gd_frames)) {
2227 if (le32_to_cpu(pstat->xmt_uruns)) {
2228 /*
2229 * if we have had more than one underrun per "DEFAULT #
2230 * OF XMITS ALLOWED PER UNDERRUN" good xmits, raise the
2231 * THRESHOLD.
2232 */
2233 if ((le32_to_cpu(pstat->xmt_gd_frames) /
2234 le32_to_cpu(pstat->xmt_uruns)) <
2235 bdp->tx_per_underrun) {
2236 bdp->tx_thld += 3;
2237 }
2238 }
2239
2240 /*
2241 * if we've had less than one underrun per the DEFAULT number of
2242 * of good xmits allowed, lower the THOLD but not less than 0
2243 */
2244 if (le32_to_cpu(pstat->xmt_gd_frames) >
2245 bdp->tx_per_underrun) {
2246 bdp->tx_thld--;
2247
2248 if (bdp->tx_thld < 6)
2249 bdp->tx_thld = 6;
2250
2251 }
2252 }
2253
2254 /* end good xmits */
2255 /*
2256 * * if our adjustments are becoming unresonable, stop adjusting &
2257 * resort * to defaults & pray. A THOLD value > 190 means that the
2258 * adapter will * wait for 190*8=1520 bytes in TX FIFO before it
2259 * starts xmit. Since * MTU is 1514, it doesn't make any sense for
2260 * further increase. */
2261 if (bdp->tx_thld >= 190) {
2262 bdp->tx_per_underrun = 0;
2263 bdp->tx_thld = 189;
2264 }
2265 } /* end underrun check */
2266 }
2267
2268 /* Changed for 82558 enhancement */
2269 /**
2270 * e100_start_cu - start the adapter's CU
2271 * @bdp: atapter's private data struct
2272 * @tcb: TCB to be transmitted
2273 *
2274 * This routine issues a CU Start or CU Resume command to the 82558/9.
2275 * This routine was added because the prepare_ext_xmit_buff takes advantage
2276 * of the 82558/9's Dynamic TBD chaining feature and has to start the CU as
2277 * soon as the first TBD is ready.
2278 *
2279 * e100_start_cu must be called while holding the tx_lock !
2280 */
2281 u8
e100_start_cu(struct e100_private * bdp,tcb_t * tcb)2282 e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
2283 {
2284 unsigned long lock_flag;
2285 u8 ret = true;
2286
2287 spin_lock_irqsave(&(bdp->bd_lock), lock_flag);
2288 switch (bdp->next_cu_cmd) {
2289 case RESUME_NO_WAIT:
2290 /*last cu command was a CU_RESMUE if this is a 558 or newer we don't need to
2291 * wait for command word to clear, we reach here only if we are bachlor
2292 */
2293 e100_exec_cmd(bdp, SCB_CUC_RESUME);
2294 break;
2295
2296 case RESUME_WAIT:
2297 if ((bdp->flags & IS_ICH) &&
2298 (bdp->cur_line_speed == 10) &&
2299 (bdp->cur_dplx_mode == HALF_DUPLEX)) {
2300 e100_wait_exec_simple(bdp, SCB_CUC_NOOP);
2301 udelay(1);
2302 }
2303 if ((e100_wait_exec_simple(bdp, SCB_CUC_RESUME)) &&
2304 (bdp->flags & IS_BACHELOR) && (!(bdp->flags & IS_ICH))) {
2305 bdp->next_cu_cmd = RESUME_NO_WAIT;
2306 }
2307 break;
2308
2309 case START_WAIT:
2310 // The last command was a non_tx CU command
2311 if (!e100_wait_cus_idle(bdp))
2312 printk(KERN_DEBUG
2313 "e100: %s: cu_start: timeout waiting for cu\n",
2314 bdp->device->name);
2315 if (!e100_wait_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
2316 SCB_CUC_START, CB_TRANSMIT)) {
2317 printk(KERN_DEBUG
2318 "e100: %s: cu_start: timeout waiting for scb\n",
2319 bdp->device->name);
2320 e100_exec_cmplx(bdp, (u32) (tcb->tcb_phys),
2321 SCB_CUC_START);
2322 ret = false;
2323 }
2324
2325 bdp->next_cu_cmd = RESUME_WAIT;
2326
2327 break;
2328 }
2329
2330 /* save the last tcb */
2331 bdp->last_tcb = tcb;
2332
2333 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2334 return ret;
2335 }
2336
2337 /* ====================================================================== */
2338 /* hw */
2339 /* ====================================================================== */
2340
2341 /**
2342 * e100_selftest - perform H/W self test
2343 * @bdp: atapter's private data struct
2344 * @st_timeout: address to return timeout value, if fails
2345 * @st_result: address to return selftest result, if fails
2346 *
2347 * This routine will issue PORT Self-test command to test the e100.
2348 * The self-test will fail if the adapter's master-enable bit is not
2349 * set in the PCI Command Register, or if the adapter is not seated
2350 * in a PCI master-enabled slot. we also disable interrupts when the
2351 * command is completed.
2352 *
2353 * Returns:
2354 * true: if adapter passes self_test
2355 * false: otherwise
2356 */
2357 unsigned char
e100_selftest(struct e100_private * bdp,u32 * st_timeout,u32 * st_result)2358 e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
2359 {
2360 u32 selftest_cmd;
2361
2362 /* initialize the nic state before running test */
2363 e100_sw_reset(bdp, PORT_SOFTWARE_RESET);
2364 /* Setup the address of the self_test area */
2365 selftest_cmd = bdp->selftest_phys;
2366
2367 /* Setup SELF TEST Command Code in D3 - D0 */
2368 selftest_cmd |= PORT_SELFTEST;
2369
2370 /* Initialize the self-test signature and results DWORDS */
2371 bdp->selftest->st_sign = 0;
2372 bdp->selftest->st_result = 0xffffffff;
2373
2374 /* Do the port command */
2375 writel(selftest_cmd, &bdp->scb->scb_port);
2376 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
2377
2378 /* Wait at least 10 milliseconds for the self-test to complete */
2379 set_current_state(TASK_UNINTERRUPTIBLE);
2380 schedule_timeout(HZ / 100 + 1);
2381
2382 /* disable interrupts since they are enabled */
2383 /* after device reset during selftest */
2384 e100_disable_clear_intr(bdp);
2385
2386 /* if The First Self Test DWORD Still Zero, We've timed out. If the
2387 * second DWORD is not zero then we have an error. */
2388 if ((bdp->selftest->st_sign == 0) || (bdp->selftest->st_result != 0)) {
2389
2390 if (st_timeout)
2391 *st_timeout = !(le32_to_cpu(bdp->selftest->st_sign));
2392
2393 if (st_result)
2394 *st_result = le32_to_cpu(bdp->selftest->st_result);
2395
2396 return false;
2397 }
2398
2399 return true;
2400 }
2401
2402 /**
2403 * e100_setup_iaaddr - issue IA setup sommand
2404 * @bdp: atapter's private data struct
2405 * @eaddr: new ethernet address
2406 *
2407 * This routine will issue the IA setup command. This command
2408 * will notify the 82557 (e100) of what its individual (node)
2409 * address is. This command will be executed in polled mode.
2410 *
2411 * Returns:
2412 * true: if the IA setup command was successfully issued and completed
2413 * false: otherwise
2414 */
2415 unsigned char
e100_setup_iaaddr(struct e100_private * bdp,u8 * eaddr)2416 e100_setup_iaaddr(struct e100_private *bdp, u8 *eaddr)
2417 {
2418 unsigned int i;
2419 cb_header_t *ntcb_hdr;
2420 unsigned char res;
2421 nxmit_cb_entry_t *cmd;
2422
2423 if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
2424 res = false;
2425 goto exit;
2426 }
2427
2428 ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
2429 ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_IA_ADDRESS);
2430
2431 for (i = 0; i < ETH_ALEN; i++) {
2432 (cmd->non_tx_cmd)->ntcb.setup.ia_addr[i] = eaddr[i];
2433 }
2434
2435 res = e100_exec_non_cu_cmd(bdp, cmd);
2436 if (!res)
2437 printk(KERN_WARNING "e100: %s: IA setup failed\n",
2438 bdp->device->name);
2439
2440 exit:
2441 return res;
2442 }
2443
2444 /**
2445 * e100_start_ru - start the RU if needed
2446 * @bdp: atapter's private data struct
2447 *
2448 * This routine checks the status of the 82557's receive unit(RU),
2449 * and starts the RU if it was not already active. However,
2450 * before restarting the RU, the driver gives the RU the buffers
2451 * it freed up during the servicing of the ISR. If there are
2452 * no free buffers to give to the RU, (i.e. we have reached a
2453 * no resource condition) the RU will not be started till the
2454 * next ISR.
2455 */
2456 void
e100_start_ru(struct e100_private * bdp)2457 e100_start_ru(struct e100_private *bdp)
2458 {
2459 struct rx_list_elem *rx_struct = NULL;
2460 int buffer_found = 0;
2461 struct list_head *entry_ptr;
2462
2463 list_for_each(entry_ptr, &(bdp->active_rx_list)) {
2464 rx_struct =
2465 list_entry(entry_ptr, struct rx_list_elem, list_elem);
2466 pci_dma_sync_single(bdp->pdev, rx_struct->dma_addr,
2467 bdp->rfd_size, PCI_DMA_FROMDEVICE);
2468 if (!((SKB_RFD_STATUS(rx_struct->skb, bdp) &
2469 __constant_cpu_to_le16(RFD_STATUS_COMPLETE)))) {
2470 buffer_found = 1;
2471 break;
2472 }
2473 }
2474
2475 /* No available buffers */
2476 if (!buffer_found) {
2477 return;
2478 }
2479
2480 spin_lock(&bdp->bd_lock);
2481
2482 if (!e100_wait_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START, 0)) {
2483 printk(KERN_DEBUG
2484 "e100: %s: start_ru: wait_scb failed\n",
2485 bdp->device->name);
2486 e100_exec_cmplx(bdp, rx_struct->dma_addr, SCB_RUC_START);
2487 }
2488 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2489 bdp->next_cu_cmd = RESUME_WAIT;
2490 }
2491 spin_unlock(&bdp->bd_lock);
2492 }
2493
2494 /**
2495 * e100_cmd_complete_location
2496 * @bdp: atapter's private data struct
2497 *
2498 * This routine returns a pointer to the location of the command-complete
2499 * DWord in the dump statistical counters area, according to the statistical
2500 * counters mode (557 - basic, 558 - extended, or 559 - TCO mode).
2501 * See e100_config_init() for the setting of the statistical counters mode.
2502 */
2503 static u32 *
e100_cmd_complete_location(struct e100_private * bdp)2504 e100_cmd_complete_location(struct e100_private *bdp)
2505 {
2506 u32 *cmd_complete;
2507 max_counters_t *stats = bdp->stats_counters;
2508
2509 switch (bdp->stat_mode) {
2510 case E100_EXTENDED_STATS:
2511 cmd_complete =
2512 (u32 *) &(((err_cntr_558_t *) (stats))->cmd_complete);
2513 break;
2514
2515 case E100_TCO_STATS:
2516 cmd_complete =
2517 (u32 *) &(((err_cntr_559_t *) (stats))->cmd_complete);
2518 break;
2519
2520 case E100_BASIC_STATS:
2521 default:
2522 cmd_complete =
2523 (u32 *) &(((err_cntr_557_t *) (stats))->cmd_complete);
2524 break;
2525 }
2526
2527 return cmd_complete;
2528 }
2529
2530 /**
2531 * e100_clr_cntrs - clear statistics counters
2532 * @bdp: atapter's private data struct
2533 *
2534 * This routine will clear the adapter error statistic counters.
2535 *
2536 * Returns:
2537 * true: if successfully cleared stat counters
2538 * false: otherwise
2539 */
2540 static unsigned char
e100_clr_cntrs(struct e100_private * bdp)2541 e100_clr_cntrs(struct e100_private *bdp)
2542 {
2543 volatile u32 *pcmd_complete;
2544
2545 /* clear the dump counter complete word */
2546 pcmd_complete = e100_cmd_complete_location(bdp);
2547 *pcmd_complete = 0;
2548 wmb();
2549
2550 if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
2551 return false;
2552
2553 /* wait 10 microseconds for the command to complete */
2554 udelay(10);
2555
2556 if (!e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT))
2557 return false;
2558
2559 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2560 bdp->next_cu_cmd = RESUME_WAIT;
2561 }
2562
2563 return true;
2564 }
2565
2566 static unsigned char
e100_update_stats(struct e100_private * bdp)2567 e100_update_stats(struct e100_private *bdp)
2568 {
2569 u32 *pcmd_complete;
2570 basic_cntr_t *pstat = &(bdp->stats_counters->basic_stats);
2571
2572 // check if last dump command completed
2573 pcmd_complete = e100_cmd_complete_location(bdp);
2574 if (*pcmd_complete != le32_to_cpu(DUMP_RST_STAT_COMPLETED) &&
2575 *pcmd_complete != le32_to_cpu(DUMP_STAT_COMPLETED)) {
2576 *pcmd_complete = 0;
2577 return false;
2578 }
2579
2580 /* increment the statistics */
2581 bdp->drv_stats.net_stats.rx_packets +=
2582 le32_to_cpu(pstat->rcv_gd_frames);
2583 bdp->drv_stats.net_stats.tx_packets +=
2584 le32_to_cpu(pstat->xmt_gd_frames);
2585 bdp->drv_stats.net_stats.rx_dropped += le32_to_cpu(pstat->rcv_rsrc_err);
2586 bdp->drv_stats.net_stats.collisions += le32_to_cpu(pstat->xmt_ttl_coll);
2587 bdp->drv_stats.net_stats.rx_length_errors +=
2588 le32_to_cpu(pstat->rcv_shrt_frames);
2589 bdp->drv_stats.net_stats.rx_over_errors +=
2590 le32_to_cpu(pstat->rcv_rsrc_err);
2591 bdp->drv_stats.net_stats.rx_crc_errors +=
2592 le32_to_cpu(pstat->rcv_crc_errs);
2593 bdp->drv_stats.net_stats.rx_frame_errors +=
2594 le32_to_cpu(pstat->rcv_algn_errs);
2595 bdp->drv_stats.net_stats.rx_fifo_errors +=
2596 le32_to_cpu(pstat->rcv_oruns);
2597 bdp->drv_stats.net_stats.tx_aborted_errors +=
2598 le32_to_cpu(pstat->xmt_max_coll);
2599 bdp->drv_stats.net_stats.tx_carrier_errors +=
2600 le32_to_cpu(pstat->xmt_lost_crs);
2601 bdp->drv_stats.net_stats.tx_fifo_errors +=
2602 le32_to_cpu(pstat->xmt_uruns);
2603
2604 bdp->drv_stats.tx_late_col += le32_to_cpu(pstat->xmt_late_coll);
2605 bdp->drv_stats.tx_ok_defrd += le32_to_cpu(pstat->xmt_deferred);
2606 bdp->drv_stats.tx_one_retry += le32_to_cpu(pstat->xmt_sngl_coll);
2607 bdp->drv_stats.tx_mt_one_retry += le32_to_cpu(pstat->xmt_mlt_coll);
2608 bdp->drv_stats.rcv_cdt_frames += le32_to_cpu(pstat->rcv_err_coll);
2609
2610 if (bdp->stat_mode != E100_BASIC_STATS) {
2611 ext_cntr_t *pex_stat = &bdp->stats_counters->extended_stats;
2612
2613 bdp->drv_stats.xmt_fc_pkts +=
2614 le32_to_cpu(pex_stat->xmt_fc_frames);
2615 bdp->drv_stats.rcv_fc_pkts +=
2616 le32_to_cpu(pex_stat->rcv_fc_frames);
2617 bdp->drv_stats.rcv_fc_unsupported +=
2618 le32_to_cpu(pex_stat->rcv_fc_unsupported);
2619 }
2620
2621 if (bdp->stat_mode == E100_TCO_STATS) {
2622 tco_cntr_t *ptco_stat = &bdp->stats_counters->tco_stats;
2623
2624 bdp->drv_stats.xmt_tco_pkts +=
2625 le16_to_cpu(ptco_stat->xmt_tco_frames);
2626 bdp->drv_stats.rcv_tco_pkts +=
2627 le16_to_cpu(ptco_stat->rcv_tco_frames);
2628 }
2629
2630 *pcmd_complete = 0;
2631 return true;
2632 }
2633
2634 /**
2635 * e100_dump_stat_cntrs
2636 * @bdp: atapter's private data struct
2637 *
2638 * This routine will dump the board statistical counters without waiting
2639 * for stat_dump to complete. Any access to this stats should verify the completion
2640 * of the command
2641 */
2642 void
e100_dump_stats_cntrs(struct e100_private * bdp)2643 e100_dump_stats_cntrs(struct e100_private *bdp)
2644 {
2645 unsigned long lock_flag_bd;
2646
2647 spin_lock_irqsave(&(bdp->bd_lock), lock_flag_bd);
2648
2649 /* dump h/w stats counters */
2650 if (e100_wait_exec_simple(bdp, SCB_CUC_DUMP_RST_STAT)) {
2651 if (bdp->next_cu_cmd == RESUME_NO_WAIT) {
2652 bdp->next_cu_cmd = RESUME_WAIT;
2653 }
2654 }
2655
2656 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag_bd);
2657 }
2658
2659 /**
2660 * e100_exec_non_cu_cmd
2661 * @bdp: atapter's private data struct
2662 * @command: the non-cu command to execute
2663 *
2664 * This routine will submit a command block to be executed,
2665 */
2666 unsigned char
e100_exec_non_cu_cmd(struct e100_private * bdp,nxmit_cb_entry_t * command)2667 e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
2668 {
2669 cb_header_t *ntcb_hdr;
2670 unsigned long lock_flag;
2671 unsigned long expiration_time;
2672 unsigned char rc = true;
2673 u8 sub_cmd;
2674
2675 ntcb_hdr = (cb_header_t *) command->non_tx_cmd; /* get hdr of non tcb cmd */
2676 sub_cmd = cpu_to_le16(ntcb_hdr->cb_cmd);
2677
2678 /* Set the Command Block to be the last command block */
2679 ntcb_hdr->cb_cmd |= __constant_cpu_to_le16(CB_EL_BIT);
2680 ntcb_hdr->cb_status = 0;
2681 ntcb_hdr->cb_lnk_ptr = 0;
2682
2683 wmb();
2684 if (in_interrupt())
2685 return e100_delayed_exec_non_cu_cmd(bdp, command);
2686
2687 if (netif_running(bdp->device) && netif_carrier_ok(bdp->device))
2688 return e100_delayed_exec_non_cu_cmd(bdp, command);
2689
2690 spin_lock_bh(&(bdp->bd_non_tx_lock));
2691
2692 if (bdp->non_tx_command_state != E100_NON_TX_IDLE) {
2693 goto delayed_exec;
2694 }
2695
2696 if (bdp->last_tcb) {
2697 rmb();
2698 if ((bdp->last_tcb->tcb_hdr.cb_status &
2699 __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
2700 goto delayed_exec;
2701 }
2702
2703 if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) == SCB_CUS_ACTIVE) {
2704 goto delayed_exec;
2705 }
2706
2707 spin_lock_irqsave(&bdp->bd_lock, lock_flag);
2708
2709 if (!e100_wait_exec_cmplx(bdp, command->dma_addr, SCB_CUC_START, sub_cmd)) {
2710 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2711 rc = false;
2712 goto exit;
2713 }
2714
2715 bdp->next_cu_cmd = START_WAIT;
2716 spin_unlock_irqrestore(&(bdp->bd_lock), lock_flag);
2717
2718 /* now wait for completion of non-cu CB up to 20 msec */
2719 expiration_time = jiffies + HZ / 50 + 1;
2720 rmb();
2721 while (!(ntcb_hdr->cb_status &
2722 __constant_cpu_to_le16(CB_STATUS_COMPLETE))) {
2723
2724 if (time_before(jiffies, expiration_time)) {
2725 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2726 yield();
2727 spin_lock_bh(&(bdp->bd_non_tx_lock));
2728 } else {
2729 #ifdef E100_CU_DEBUG
2730 printk(KERN_ERR "e100: %s: non-TX command (%x) "
2731 "timeout\n", bdp->device->name, sub_cmd);
2732 #endif
2733 rc = false;
2734 goto exit;
2735 }
2736 rmb();
2737 }
2738
2739 exit:
2740 e100_free_non_tx_cmd(bdp, command);
2741
2742 if (netif_running(bdp->device))
2743 netif_wake_queue(bdp->device);
2744
2745 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2746 return rc;
2747
2748 delayed_exec:
2749 spin_unlock_bh(&(bdp->bd_non_tx_lock));
2750 return e100_delayed_exec_non_cu_cmd(bdp, command);
2751 }
2752
2753 /**
2754 * e100_sw_reset
2755 * @bdp: atapter's private data struct
2756 * @reset_cmd: s/w reset or selective reset
2757 *
2758 * This routine will issue a software reset to the adapter. It
2759 * will also disable interrupts, as the are enabled after reset.
2760 */
2761 void
e100_sw_reset(struct e100_private * bdp,u32 reset_cmd)2762 e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
2763 {
2764 /* Do a selective reset first to avoid a potential PCI hang */
2765 writel(PORT_SELECTIVE_RESET, &bdp->scb->scb_port);
2766 readw(&(bdp->scb->scb_status)); /* flushes last write, read-safe */
2767
2768 /* wait for the reset to take effect */
2769 udelay(20);
2770 if (reset_cmd == PORT_SOFTWARE_RESET) {
2771 writel(PORT_SOFTWARE_RESET, &bdp->scb->scb_port);
2772
2773 /* wait 20 micro seconds for the reset to take effect */
2774 udelay(20);
2775 }
2776
2777 /* Mask off our interrupt line -- it is unmasked after reset */
2778 e100_disable_clear_intr(bdp);
2779 #ifdef E100_CU_DEBUG
2780 bdp->last_cmd = 0;
2781 bdp->last_sub_cmd = 0;
2782 #endif
2783 }
2784
2785 /**
2786 * e100_load_microcode - Download microsocde to controller.
2787 * @bdp: atapter's private data struct
2788 *
2789 * This routine downloads microcode on to the controller. This
2790 * microcode is available for the 82558/9, 82550. Currently the
2791 * microcode handles interrupt bundling and TCO workaround.
2792 *
2793 * Returns:
2794 * true: if successfull
2795 * false: otherwise
2796 */
2797 static unsigned char
e100_load_microcode(struct e100_private * bdp)2798 e100_load_microcode(struct e100_private *bdp)
2799 {
2800 static struct {
2801 u8 rev_id;
2802 u32 ucode[UCODE_MAX_DWORDS + 1];
2803 int timer_dword;
2804 int bundle_dword;
2805 int min_size_dword;
2806 } ucode_opts[] = {
2807 { D101A4_REV_ID,
2808 D101_A_RCVBUNDLE_UCODE,
2809 D101_CPUSAVER_TIMER_DWORD,
2810 D101_CPUSAVER_BUNDLE_DWORD,
2811 D101_CPUSAVER_MIN_SIZE_DWORD },
2812 { D101B0_REV_ID,
2813 D101_B0_RCVBUNDLE_UCODE,
2814 D101_CPUSAVER_TIMER_DWORD,
2815 D101_CPUSAVER_BUNDLE_DWORD,
2816 D101_CPUSAVER_MIN_SIZE_DWORD },
2817 { D101MA_REV_ID,
2818 D101M_B_RCVBUNDLE_UCODE,
2819 D101M_CPUSAVER_TIMER_DWORD,
2820 D101M_CPUSAVER_BUNDLE_DWORD,
2821 D101M_CPUSAVER_MIN_SIZE_DWORD },
2822 { D101S_REV_ID,
2823 D101S_RCVBUNDLE_UCODE,
2824 D101S_CPUSAVER_TIMER_DWORD,
2825 D101S_CPUSAVER_BUNDLE_DWORD,
2826 D101S_CPUSAVER_MIN_SIZE_DWORD },
2827 { D102_REV_ID,
2828 D102_B_RCVBUNDLE_UCODE,
2829 D102_B_CPUSAVER_TIMER_DWORD,
2830 D102_B_CPUSAVER_BUNDLE_DWORD,
2831 D102_B_CPUSAVER_MIN_SIZE_DWORD },
2832 { D102C_REV_ID,
2833 D102_C_RCVBUNDLE_UCODE,
2834 D102_C_CPUSAVER_TIMER_DWORD,
2835 D102_C_CPUSAVER_BUNDLE_DWORD,
2836 D102_C_CPUSAVER_MIN_SIZE_DWORD },
2837 { D102E_REV_ID,
2838 D102_E_RCVBUNDLE_UCODE,
2839 D102_E_CPUSAVER_TIMER_DWORD,
2840 D102_E_CPUSAVER_BUNDLE_DWORD,
2841 D102_E_CPUSAVER_MIN_SIZE_DWORD },
2842 { D102E_A1_REV_ID,
2843 D102_E_RCVBUNDLE_UCODE,
2844 D102_E_CPUSAVER_TIMER_DWORD,
2845 D102_E_CPUSAVER_BUNDLE_DWORD,
2846 D102_E_CPUSAVER_MIN_SIZE_DWORD },
2847 { 0, {0}, 0, 0, 0}
2848 }, *opts;
2849
2850 opts = ucode_opts;
2851
2852 /* User turned ucode loading off */
2853 if (!(bdp->params.b_params & PRM_UCODE))
2854 return false;
2855
2856 /* These controllers do not need ucode */
2857 if (bdp->flags & IS_ICH)
2858 return false;
2859
2860 /* Search for ucode match against h/w rev_id */
2861 while (opts->rev_id) {
2862 if (bdp->rev_id == opts->rev_id) {
2863 int i;
2864 u32 *ucode_dword;
2865 load_ucode_cb_t *ucode_cmd_ptr;
2866 nxmit_cb_entry_t *cmd = e100_alloc_non_tx_cmd(bdp);
2867
2868 if (cmd != NULL) {
2869 ucode_cmd_ptr =
2870 (load_ucode_cb_t *) cmd->non_tx_cmd;
2871 ucode_dword = ucode_cmd_ptr->ucode_dword;
2872 } else {
2873 return false;
2874 }
2875
2876 memcpy(ucode_dword, opts->ucode, sizeof (opts->ucode));
2877
2878 /* Insert user-tunable settings */
2879 ucode_dword[opts->timer_dword] &= 0xFFFF0000;
2880 ucode_dword[opts->timer_dword] |=
2881 (u16) bdp->params.IntDelay;
2882 ucode_dword[opts->bundle_dword] &= 0xFFFF0000;
2883 ucode_dword[opts->bundle_dword] |=
2884 (u16) bdp->params.BundleMax;
2885 ucode_dword[opts->min_size_dword] &= 0xFFFF0000;
2886 ucode_dword[opts->min_size_dword] |=
2887 (bdp->params.b_params & PRM_BUNDLE_SMALL) ?
2888 0xFFFF : 0xFF80;
2889
2890 for (i = 0; i < UCODE_MAX_DWORDS; i++)
2891 cpu_to_le32s(&(ucode_dword[i]));
2892
2893 ucode_cmd_ptr->load_ucode_cbhdr.cb_cmd =
2894 __constant_cpu_to_le16(CB_LOAD_MICROCODE);
2895
2896 return e100_exec_non_cu_cmd(bdp, cmd);
2897 }
2898 opts++;
2899 }
2900
2901 return false;
2902 }
2903
2904 /***************************************************************************/
2905 /***************************************************************************/
2906 /* EEPROM Functions */
2907 /***************************************************************************/
2908
2909 /* Read PWA (printed wired assembly) number */
2910 void __devinit
e100_rd_pwa_no(struct e100_private * bdp)2911 e100_rd_pwa_no(struct e100_private *bdp)
2912 {
2913 bdp->pwa_no = e100_eeprom_read(bdp, EEPROM_PWA_NO);
2914 bdp->pwa_no <<= 16;
2915 bdp->pwa_no |= e100_eeprom_read(bdp, EEPROM_PWA_NO + 1);
2916 }
2917
2918 /* Read the permanent ethernet address from the eprom. */
2919 void __devinit
e100_rd_eaddr(struct e100_private * bdp)2920 e100_rd_eaddr(struct e100_private *bdp)
2921 {
2922 int i;
2923 u16 eeprom_word;
2924
2925 for (i = 0; i < 6; i += 2) {
2926 eeprom_word =
2927 e100_eeprom_read(bdp,
2928 EEPROM_NODE_ADDRESS_BYTE_0 + (i / 2));
2929
2930 bdp->device->dev_addr[i] =
2931 bdp->perm_node_address[i] = (u8) eeprom_word;
2932 bdp->device->dev_addr[i + 1] =
2933 bdp->perm_node_address[i + 1] = (u8) (eeprom_word >> 8);
2934 }
2935 }
2936
2937 /* Check the D102 RFD flags to see if the checksum passed */
2938 static unsigned char
e100_D102_check_checksum(rfd_t * rfd)2939 e100_D102_check_checksum(rfd_t *rfd)
2940 {
2941 if (((le16_to_cpu(rfd->rfd_header.cb_status)) & RFD_PARSE_BIT)
2942 && (((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
2943 RFD_TCP_PACKET)
2944 || ((rfd->rcvparserstatus & CHECKSUM_PROTOCOL_MASK) ==
2945 RFD_UDP_PACKET))
2946 && (rfd->checksumstatus & TCPUDP_CHECKSUM_BIT_VALID)
2947 && (rfd->checksumstatus & TCPUDP_CHECKSUM_VALID)) {
2948 return CHECKSUM_UNNECESSARY;
2949 }
2950 return CHECKSUM_NONE;
2951 }
2952
2953 /**
2954 * e100_D101M_checksum
2955 * @bdp: atapter's private data struct
2956 * @skb: skb received
2957 *
2958 * Sets the skb->csum value from D101 csum found at the end of the Rx frame. The
2959 * D101M sums all words in frame excluding the ethernet II header (14 bytes) so
2960 * in case the packet is ethernet II and the protocol is IP, all is need is to
2961 * assign this value to skb->csum.
2962 */
2963 static unsigned char
e100_D101M_checksum(struct e100_private * bdp,struct sk_buff * skb)2964 e100_D101M_checksum(struct e100_private *bdp, struct sk_buff *skb)
2965 {
2966 unsigned short proto = (skb->protocol);
2967
2968 if (proto == __constant_htons(ETH_P_IP)) {
2969
2970 skb->csum = get_unaligned((u16 *) (skb->tail));
2971 return CHECKSUM_HW;
2972 }
2973 return CHECKSUM_NONE;
2974 }
2975
2976 /***************************************************************************/
2977 /***************************************************************************/
2978 /***************************************************************************/
2979 /***************************************************************************/
2980 /* Auxilary Functions */
2981 /***************************************************************************/
2982
2983 /* Print the board's configuration */
2984 void __devinit
e100_print_brd_conf(struct e100_private * bdp)2985 e100_print_brd_conf(struct e100_private *bdp)
2986 {
2987 /* Print the string if checksum Offloading was enabled */
2988 if (bdp->flags & DF_CSUM_OFFLOAD)
2989 printk(KERN_NOTICE " Hardware receive checksums enabled\n");
2990 else {
2991 if (bdp->rev_id >= D101MA_REV_ID)
2992 printk(KERN_NOTICE " Hardware receive checksums disabled\n");
2993 }
2994
2995 if ((bdp->flags & DF_UCODE_LOADED))
2996 printk(KERN_NOTICE " cpu cycle saver enabled\n");
2997 }
2998
2999 /**
3000 * e100_pci_setup - setup the adapter's PCI information
3001 * @pcid: adapter's pci_dev struct
3002 * @bdp: atapter's private data struct
3003 *
3004 * This routine sets up all PCI information for the adapter. It enables the bus
3005 * master bit (some BIOS don't do this), requests memory ans I/O regions, and
3006 * calls ioremap() on the adapter's memory region.
3007 *
3008 * Returns:
3009 * true: if successfull
3010 * false: otherwise
3011 */
3012 static unsigned char __devinit
e100_pci_setup(struct pci_dev * pcid,struct e100_private * bdp)3013 e100_pci_setup(struct pci_dev *pcid, struct e100_private *bdp)
3014 {
3015 struct net_device *dev = bdp->device;
3016 int rc = 0;
3017
3018 if ((rc = pci_enable_device(pcid)) != 0) {
3019 goto err;
3020 }
3021
3022 /* dev and ven ID have already been checked so it is our device */
3023 pci_read_config_byte(pcid, PCI_REVISION_ID, (u8 *) &(bdp->rev_id));
3024
3025 /* address #0 is a memory region */
3026 dev->mem_start = pci_resource_start(pcid, 0);
3027 dev->mem_end = dev->mem_start + sizeof (scb_t);
3028
3029 /* address #1 is a IO region */
3030 dev->base_addr = pci_resource_start(pcid, 1);
3031
3032 if ((rc = pci_request_regions(pcid, e100_short_driver_name)) != 0) {
3033 goto err_disable;
3034 }
3035
3036 pci_enable_wake(pcid, 0, 0);
3037
3038 /* if Bus Mastering is off, turn it on! */
3039 pci_set_master(pcid);
3040
3041 /* address #0 is a memory mapping */
3042 bdp->scb = (scb_t *) ioremap_nocache(dev->mem_start, sizeof (scb_t));
3043
3044 if (!bdp->scb) {
3045 printk(KERN_ERR "e100: %s: Failed to map PCI address 0x%lX\n",
3046 dev->name, pci_resource_start(pcid, 0));
3047 rc = -ENOMEM;
3048 goto err_region;
3049 }
3050
3051 return 0;
3052
3053 err_region:
3054 pci_release_regions(pcid);
3055 err_disable:
3056 pci_disable_device(pcid);
3057 err:
3058 return rc;
3059 }
3060
3061 void
e100_isolate_driver(struct e100_private * bdp)3062 e100_isolate_driver(struct e100_private *bdp)
3063 {
3064
3065 /* Check if interface is up */
3066 /* NOTE: Can't use netif_running(bdp->device) because */
3067 /* dev_close clears __LINK_STATE_START before calling */
3068 /* e100_close (aka dev->stop) */
3069 if (bdp->device->flags & IFF_UP) {
3070 e100_disable_clear_intr(bdp);
3071 del_timer_sync(&bdp->watchdog_timer);
3072 netif_carrier_off(bdp->device);
3073 netif_stop_queue(bdp->device);
3074 bdp->last_tcb = NULL;
3075 }
3076 e100_sw_reset(bdp, PORT_SELECTIVE_RESET);
3077 }
3078
3079 static void
e100_tcb_add_C_bit(struct e100_private * bdp)3080 e100_tcb_add_C_bit(struct e100_private *bdp)
3081 {
3082 tcb_t *tcb = (tcb_t *) bdp->tcb_pool.data;
3083 int i;
3084
3085 for (i = 0; i < bdp->params.TxDescriptors; i++, tcb++) {
3086 tcb->tcb_hdr.cb_status |= cpu_to_le16(CB_STATUS_COMPLETE);
3087 }
3088 }
3089
3090 /*
3091 * Procedure: e100_configure_device
3092 *
3093 * Description: This routine will configure device
3094 *
3095 * Arguments:
3096 * bdp - Ptr to this card's e100_bdconfig structure
3097 *
3098 * Returns:
3099 * true upon success
3100 * false upon failure
3101 */
3102 unsigned char
e100_configure_device(struct e100_private * bdp)3103 e100_configure_device(struct e100_private *bdp)
3104 {
3105 /*load CU & RU base */
3106 if (!e100_wait_exec_cmplx(bdp, 0, SCB_CUC_LOAD_BASE, 0))
3107 return false;
3108
3109 if (e100_load_microcode(bdp))
3110 bdp->flags |= DF_UCODE_LOADED;
3111
3112 if (!e100_wait_exec_cmplx(bdp, 0, SCB_RUC_LOAD_BASE, 0))
3113 return false;
3114
3115 /* Issue the load dump counters address command */
3116 if (!e100_wait_exec_cmplx(bdp, bdp->stat_cnt_phys, SCB_CUC_DUMP_ADDR, 0))
3117 return false;
3118
3119 if (!e100_setup_iaaddr(bdp, bdp->device->dev_addr)) {
3120 printk(KERN_ERR "e100: e100_configure_device: "
3121 "setup iaaddr failed\n");
3122 return false;
3123 }
3124
3125 e100_set_multi_exec(bdp->device);
3126
3127 /* Change for 82558 enhancement */
3128 /* If 82558/9 and if the user has enabled flow control, set up */
3129 /* flow Control Reg. in the CSR */
3130 if ((bdp->flags & IS_BACHELOR)
3131 && (bdp->params.b_params & PRM_FC)) {
3132 writeb(DFLT_FC_THLD,
3133 &bdp->scb->scb_ext.d101_scb.scb_fc_thld);
3134 writeb(DFLT_FC_CMD,
3135 &bdp->scb->scb_ext.d101_scb.scb_fc_xon_xoff);
3136 }
3137
3138 e100_force_config(bdp);
3139
3140 return true;
3141 }
3142
3143 void
e100_deisolate_driver(struct e100_private * bdp,u8 full_reset)3144 e100_deisolate_driver(struct e100_private *bdp, u8 full_reset)
3145 {
3146 u32 cmd = full_reset ? PORT_SOFTWARE_RESET : PORT_SELECTIVE_RESET;
3147 e100_sw_reset(bdp, cmd);
3148 if (cmd == PORT_SOFTWARE_RESET) {
3149 if (!e100_configure_device(bdp))
3150 printk(KERN_ERR "e100: e100_deisolate_driver:"
3151 " device configuration failed\n");
3152 }
3153
3154 if (netif_running(bdp->device)) {
3155
3156 bdp->next_cu_cmd = START_WAIT;
3157 bdp->last_tcb = NULL;
3158
3159 e100_start_ru(bdp);
3160
3161 /* relaunch watchdog timer in 2 sec */
3162 mod_timer(&(bdp->watchdog_timer), jiffies + (2 * HZ));
3163
3164 // we must clear tcbs since we may have lost Tx intrrupt
3165 // or have unsent frames on the tcb chain
3166 e100_tcb_add_C_bit(bdp);
3167 e100_tx_srv(bdp);
3168 netif_wake_queue(bdp->device);
3169 e100_set_intr_mask(bdp);
3170 }
3171 }
3172
3173 static int
e100_do_ethtool_ioctl(struct net_device * dev,struct ifreq * ifr)3174 e100_do_ethtool_ioctl(struct net_device *dev, struct ifreq *ifr)
3175 {
3176 struct ethtool_cmd ecmd;
3177 int rc = -EOPNOTSUPP;
3178
3179 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd.cmd)))
3180 return -EFAULT;
3181
3182 switch (ecmd.cmd) {
3183 case ETHTOOL_GSET:
3184 rc = e100_ethtool_get_settings(dev, ifr);
3185 break;
3186 case ETHTOOL_SSET:
3187 rc = e100_ethtool_set_settings(dev, ifr);
3188 break;
3189 case ETHTOOL_GDRVINFO:
3190 rc = e100_ethtool_get_drvinfo(dev, ifr);
3191 break;
3192 case ETHTOOL_GREGS:
3193 rc = e100_ethtool_gregs(dev, ifr);
3194 break;
3195 case ETHTOOL_NWAY_RST:
3196 rc = e100_ethtool_nway_rst(dev, ifr);
3197 break;
3198 case ETHTOOL_GLINK:
3199 rc = e100_ethtool_glink(dev, ifr);
3200 break;
3201 case ETHTOOL_GEEPROM:
3202 case ETHTOOL_SEEPROM:
3203 rc = e100_ethtool_eeprom(dev, ifr);
3204 break;
3205 case ETHTOOL_GSTATS: {
3206 struct {
3207 struct ethtool_stats cmd;
3208 uint64_t data[E100_STATS_LEN];
3209 } stats = { {ETHTOOL_GSTATS, E100_STATS_LEN} };
3210 struct e100_private *bdp = dev->priv;
3211 void *addr = ifr->ifr_data;
3212 int i;
3213
3214 for(i = 0; i < E100_NET_STATS_LEN; i++)
3215 stats.data[i] =
3216 ((unsigned long *)&bdp->drv_stats.net_stats)[i];
3217 stats.data[i++] = bdp->drv_stats.tx_late_col;
3218 stats.data[i++] = bdp->drv_stats.tx_ok_defrd;
3219 stats.data[i++] = bdp->drv_stats.tx_one_retry;
3220 stats.data[i++] = bdp->drv_stats.tx_mt_one_retry;
3221 stats.data[i++] = bdp->drv_stats.rcv_cdt_frames;
3222 stats.data[i++] = bdp->drv_stats.xmt_fc_pkts;
3223 stats.data[i++] = bdp->drv_stats.rcv_fc_pkts;
3224 stats.data[i++] = bdp->drv_stats.rcv_fc_unsupported;
3225 stats.data[i++] = bdp->drv_stats.xmt_tco_pkts;
3226 stats.data[i++] = bdp->drv_stats.rcv_tco_pkts;
3227 if(copy_to_user(addr, &stats, sizeof(stats)))
3228 return -EFAULT;
3229 return 0;
3230 }
3231 case ETHTOOL_GWOL:
3232 case ETHTOOL_SWOL:
3233 rc = e100_ethtool_wol(dev, ifr);
3234 break;
3235 case ETHTOOL_TEST:
3236 rc = e100_ethtool_test(dev, ifr);
3237 break;
3238 case ETHTOOL_GSTRINGS:
3239 rc = e100_ethtool_gstrings(dev,ifr);
3240 break;
3241 case ETHTOOL_PHYS_ID:
3242 rc = e100_ethtool_led_blink(dev,ifr);
3243 break;
3244 #ifdef ETHTOOL_GRINGPARAM
3245 case ETHTOOL_GRINGPARAM: {
3246 struct ethtool_ringparam ering;
3247 struct e100_private *bdp = dev->priv;
3248 memset((void *) &ering, 0, sizeof(ering));
3249 ering.rx_max_pending = E100_MAX_RFD;
3250 ering.tx_max_pending = E100_MAX_TCB;
3251 ering.rx_pending = bdp->params.RxDescriptors;
3252 ering.tx_pending = bdp->params.TxDescriptors;
3253 rc = copy_to_user(ifr->ifr_data, &ering, sizeof(ering))
3254 ? -EFAULT : 0;
3255 return rc;
3256 }
3257 #endif
3258 #ifdef ETHTOOL_SRINGPARAM
3259 case ETHTOOL_SRINGPARAM: {
3260 struct ethtool_ringparam ering;
3261 struct e100_private *bdp = dev->priv;
3262 if (copy_from_user(&ering, ifr->ifr_data, sizeof(ering)))
3263 return -EFAULT;
3264 if (ering.rx_pending > E100_MAX_RFD
3265 || ering.rx_pending < E100_MIN_RFD)
3266 return -EINVAL;
3267 if (ering.tx_pending > E100_MAX_TCB
3268 || ering.tx_pending < E100_MIN_TCB)
3269 return -EINVAL;
3270 if (netif_running(dev)) {
3271 spin_lock_bh(&dev->xmit_lock);
3272 e100_close(dev);
3273 spin_unlock_bh(&dev->xmit_lock);
3274 /* Use new values to open interface */
3275 bdp->params.RxDescriptors = ering.rx_pending;
3276 bdp->params.TxDescriptors = ering.tx_pending;
3277 e100_hw_init(bdp);
3278 e100_open(dev);
3279 }
3280 else {
3281 bdp->params.RxDescriptors = ering.rx_pending;
3282 bdp->params.TxDescriptors = ering.tx_pending;
3283 }
3284 return 0;
3285 }
3286 #endif
3287 #ifdef ETHTOOL_GPAUSEPARAM
3288 case ETHTOOL_GPAUSEPARAM: {
3289 struct ethtool_pauseparam epause;
3290 struct e100_private *bdp = dev->priv;
3291 memset((void *) &epause, 0, sizeof(epause));
3292 if ((bdp->flags & IS_BACHELOR)
3293 && (bdp->params.b_params & PRM_FC)) {
3294 epause.autoneg = 1;
3295 if (bdp->flags & DF_LINK_FC_CAP) {
3296 epause.rx_pause = 1;
3297 epause.tx_pause = 1;
3298 }
3299 if (bdp->flags & DF_LINK_FC_TX_ONLY)
3300 epause.tx_pause = 1;
3301 }
3302 rc = copy_to_user(ifr->ifr_data, &epause, sizeof(epause))
3303 ? -EFAULT : 0;
3304 return rc;
3305 }
3306 #endif
3307 #ifdef ETHTOOL_SPAUSEPARAM
3308 case ETHTOOL_SPAUSEPARAM: {
3309 struct ethtool_pauseparam epause;
3310 struct e100_private *bdp = dev->priv;
3311 if (!(bdp->flags & IS_BACHELOR))
3312 return -EINVAL;
3313 if (copy_from_user(&epause, ifr->ifr_data, sizeof(epause)))
3314 return -EFAULT;
3315 if (epause.autoneg == 1)
3316 bdp->params.b_params |= PRM_FC;
3317 else
3318 bdp->params.b_params &= ~PRM_FC;
3319 if (netif_running(dev)) {
3320 spin_lock_bh(&dev->xmit_lock);
3321 e100_close(dev);
3322 spin_unlock_bh(&dev->xmit_lock);
3323 e100_hw_init(bdp);
3324 e100_open(dev);
3325 }
3326 return 0;
3327 }
3328 #endif
3329 #ifdef ETHTOOL_GRXCSUM
3330 case ETHTOOL_GRXCSUM:
3331 case ETHTOOL_GTXCSUM:
3332 case ETHTOOL_GSG:
3333 { struct ethtool_value eval;
3334 struct e100_private *bdp = dev->priv;
3335 memset((void *) &eval, 0, sizeof(eval));
3336 if ((ecmd.cmd == ETHTOOL_GRXCSUM)
3337 && (bdp->params.b_params & PRM_XSUMRX))
3338 eval.data = 1;
3339 else
3340 eval.data = 0;
3341 rc = copy_to_user(ifr->ifr_data, &eval, sizeof(eval))
3342 ? -EFAULT : 0;
3343 return rc;
3344 }
3345 #endif
3346 #ifdef ETHTOOL_SRXCSUM
3347 case ETHTOOL_SRXCSUM:
3348 case ETHTOOL_STXCSUM:
3349 case ETHTOOL_SSG:
3350 { struct ethtool_value eval;
3351 struct e100_private *bdp = dev->priv;
3352 if (copy_from_user(&eval, ifr->ifr_data, sizeof(eval)))
3353 return -EFAULT;
3354 if (ecmd.cmd == ETHTOOL_SRXCSUM) {
3355 if (eval.data == 1) {
3356 if (bdp->rev_id >= D101MA_REV_ID)
3357 bdp->params.b_params |= PRM_XSUMRX;
3358 else
3359 return -EINVAL;
3360 } else {
3361 if (bdp->rev_id >= D101MA_REV_ID)
3362 bdp->params.b_params &= ~PRM_XSUMRX;
3363 else
3364 return 0;
3365 }
3366 } else {
3367 if (eval.data == 1)
3368 return -EINVAL;
3369 else
3370 return 0;
3371 }
3372 if (netif_running(dev)) {
3373 spin_lock_bh(&dev->xmit_lock);
3374 e100_close(dev);
3375 spin_unlock_bh(&dev->xmit_lock);
3376 e100_hw_init(bdp);
3377 e100_open(dev);
3378 }
3379 return 0;
3380 }
3381 #endif
3382 default:
3383 break;
3384 } //switch
3385 return rc;
3386 }
3387
3388 static int
e100_ethtool_get_settings(struct net_device * dev,struct ifreq * ifr)3389 e100_ethtool_get_settings(struct net_device *dev, struct ifreq *ifr)
3390 {
3391 struct e100_private *bdp;
3392 struct ethtool_cmd ecmd;
3393 u16 advert = 0;
3394
3395 memset((void *) &ecmd, 0, sizeof (ecmd));
3396
3397 bdp = dev->priv;
3398
3399 ecmd.supported = bdp->speed_duplex_caps;
3400
3401 ecmd.port =
3402 (bdp->speed_duplex_caps & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
3403 ecmd.transceiver = XCVR_INTERNAL;
3404 ecmd.phy_address = bdp->phy_addr;
3405
3406 if (netif_carrier_ok(bdp->device)) {
3407 ecmd.speed = bdp->cur_line_speed;
3408 ecmd.duplex =
3409 (bdp->cur_dplx_mode == HALF_DUPLEX) ? DUPLEX_HALF : DUPLEX_FULL;
3410 }
3411 else {
3412 ecmd.speed = -1;
3413 ecmd.duplex = -1;
3414 }
3415
3416 ecmd.advertising = ADVERTISED_TP;
3417
3418 if (bdp->params.e100_speed_duplex == E100_AUTONEG) {
3419 ecmd.autoneg = AUTONEG_ENABLE;
3420 ecmd.advertising |= ADVERTISED_Autoneg;
3421 } else {
3422 ecmd.autoneg = AUTONEG_DISABLE;
3423 }
3424
3425 if (bdp->speed_duplex_caps & SUPPORTED_MII) {
3426 e100_mdi_read(bdp, MII_ADVERTISE, bdp->phy_addr, &advert);
3427
3428 if (advert & ADVERTISE_10HALF)
3429 ecmd.advertising |= ADVERTISED_10baseT_Half;
3430 if (advert & ADVERTISE_10FULL)
3431 ecmd.advertising |= ADVERTISED_10baseT_Full;
3432 if (advert & ADVERTISE_100HALF)
3433 ecmd.advertising |= ADVERTISED_100baseT_Half;
3434 if (advert & ADVERTISE_100FULL)
3435 ecmd.advertising |= ADVERTISED_100baseT_Full;
3436 } else {
3437 ecmd.autoneg = AUTONEG_DISABLE;
3438 ecmd.advertising &= ~ADVERTISED_Autoneg;
3439 }
3440
3441 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3442 return -EFAULT;
3443
3444 return 0;
3445 }
3446
3447 static int
e100_ethtool_set_settings(struct net_device * dev,struct ifreq * ifr)3448 e100_ethtool_set_settings(struct net_device *dev, struct ifreq *ifr)
3449 {
3450 struct e100_private *bdp;
3451 int e100_new_speed_duplex;
3452 int ethtool_new_speed_duplex;
3453 struct ethtool_cmd ecmd;
3454
3455 bdp = dev->priv;
3456 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd))) {
3457 return -EFAULT;
3458 }
3459
3460 if ((ecmd.autoneg == AUTONEG_ENABLE)
3461 && (bdp->speed_duplex_caps & SUPPORTED_Autoneg)) {
3462 bdp->params.e100_speed_duplex = E100_AUTONEG;
3463 if (netif_running(dev)) {
3464 spin_lock_bh(&dev->xmit_lock);
3465 e100_close(dev);
3466 spin_unlock_bh(&dev->xmit_lock);
3467 e100_hw_init(bdp);
3468 e100_open(dev);
3469 }
3470 } else {
3471 if (ecmd.speed == SPEED_10) {
3472 if (ecmd.duplex == DUPLEX_HALF) {
3473 e100_new_speed_duplex =
3474 E100_SPEED_10_HALF;
3475 ethtool_new_speed_duplex =
3476 SUPPORTED_10baseT_Half;
3477 } else {
3478 e100_new_speed_duplex =
3479 E100_SPEED_10_FULL;
3480 ethtool_new_speed_duplex =
3481 SUPPORTED_10baseT_Full;
3482 }
3483 } else {
3484 if (ecmd.duplex == DUPLEX_HALF) {
3485 e100_new_speed_duplex =
3486 E100_SPEED_100_HALF;
3487 ethtool_new_speed_duplex =
3488 SUPPORTED_100baseT_Half;
3489 } else {
3490 e100_new_speed_duplex =
3491 E100_SPEED_100_FULL;
3492 ethtool_new_speed_duplex =
3493 SUPPORTED_100baseT_Full;
3494 }
3495 }
3496
3497 if (bdp->speed_duplex_caps & ethtool_new_speed_duplex) {
3498 bdp->params.e100_speed_duplex =
3499 e100_new_speed_duplex;
3500 if (netif_running(dev)) {
3501 spin_lock_bh(&dev->xmit_lock);
3502 e100_close(dev);
3503 spin_unlock_bh(&dev->xmit_lock);
3504 e100_hw_init(bdp);
3505 e100_open(dev);
3506 }
3507 } else {
3508 return -EOPNOTSUPP;
3509 }
3510 }
3511
3512 return 0;
3513 }
3514
3515 static int
e100_ethtool_glink(struct net_device * dev,struct ifreq * ifr)3516 e100_ethtool_glink(struct net_device *dev, struct ifreq *ifr)
3517 {
3518 struct e100_private *bdp;
3519 struct ethtool_value info;
3520
3521 memset((void *) &info, 0, sizeof (info));
3522
3523 bdp = dev->priv;
3524 info.cmd = ETHTOOL_GLINK;
3525
3526 /* Consider both PHY link and netif_running */
3527 info.data = e100_update_link_state(bdp);
3528
3529 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3530 return -EFAULT;
3531
3532 return 0;
3533 }
3534
3535 static int
e100_ethtool_test(struct net_device * dev,struct ifreq * ifr)3536 e100_ethtool_test(struct net_device *dev, struct ifreq *ifr)
3537 {
3538 struct ethtool_test *info;
3539 int rc = -EFAULT;
3540
3541 info = kmalloc(sizeof(*info) + max_test_res * sizeof(u64),
3542 GFP_ATOMIC);
3543
3544 if (!info)
3545 return -ENOMEM;
3546
3547 memset((void *) info, 0, sizeof(*info) +
3548 max_test_res * sizeof(u64));
3549
3550 if (copy_from_user(info, ifr->ifr_data, sizeof(*info)))
3551 goto exit;
3552
3553 info->flags = e100_run_diag(dev, info->data, info->flags);
3554
3555 if (!copy_to_user(ifr->ifr_data, info,
3556 sizeof(*info) + max_test_res * sizeof(u64)))
3557 rc = 0;
3558 exit:
3559 kfree(info);
3560 return rc;
3561 }
3562
3563 static int
e100_ethtool_gregs(struct net_device * dev,struct ifreq * ifr)3564 e100_ethtool_gregs(struct net_device *dev, struct ifreq *ifr)
3565 {
3566 struct e100_private *bdp;
3567 u32 regs_buff[E100_REGS_LEN];
3568 struct ethtool_regs regs = {ETHTOOL_GREGS};
3569 void *addr = ifr->ifr_data;
3570 u16 mdi_reg;
3571
3572 bdp = dev->priv;
3573
3574 if(copy_from_user(®s, addr, sizeof(regs)))
3575 return -EFAULT;
3576
3577 regs.version = (1 << 24) | bdp->rev_id;
3578 regs_buff[0] = readb(&(bdp->scb->scb_cmd_hi)) << 24 |
3579 readb(&(bdp->scb->scb_cmd_low)) << 16 |
3580 readw(&(bdp->scb->scb_status));
3581 e100_mdi_read(bdp, MII_NCONFIG, bdp->phy_addr, &mdi_reg);
3582 regs_buff[1] = mdi_reg;
3583
3584 if(copy_to_user(addr, ®s, sizeof(regs)))
3585 return -EFAULT;
3586
3587 addr += offsetof(struct ethtool_regs, data);
3588 if(copy_to_user(addr, regs_buff, regs.len))
3589 return -EFAULT;
3590
3591 return 0;
3592 }
3593
3594 static int
e100_ethtool_nway_rst(struct net_device * dev,struct ifreq * ifr)3595 e100_ethtool_nway_rst(struct net_device *dev, struct ifreq *ifr)
3596 {
3597 struct e100_private *bdp;
3598
3599 bdp = dev->priv;
3600
3601 if ((bdp->speed_duplex_caps & SUPPORTED_Autoneg) &&
3602 (bdp->params.e100_speed_duplex == E100_AUTONEG)) {
3603 if (netif_running(dev)) {
3604 spin_lock_bh(&dev->xmit_lock);
3605 e100_close(dev);
3606 spin_unlock_bh(&dev->xmit_lock);
3607 e100_hw_init(bdp);
3608 e100_open(dev);
3609 }
3610 } else {
3611 return -EFAULT;
3612 }
3613 return 0;
3614 }
3615
3616 static int
e100_ethtool_get_drvinfo(struct net_device * dev,struct ifreq * ifr)3617 e100_ethtool_get_drvinfo(struct net_device *dev, struct ifreq *ifr)
3618 {
3619 struct e100_private *bdp;
3620 struct ethtool_drvinfo info;
3621
3622 memset((void *) &info, 0, sizeof (info));
3623
3624 bdp = dev->priv;
3625
3626 strncpy(info.driver, e100_short_driver_name, sizeof (info.driver) - 1);
3627 strncpy(info.version, e100_driver_version, sizeof (info.version) - 1);
3628 strncpy(info.fw_version, "N/A",
3629 sizeof (info.fw_version) - 1);
3630 strncpy(info.bus_info, pci_name(bdp->pdev),
3631 sizeof (info.bus_info) - 1);
3632 info.n_stats = E100_STATS_LEN;
3633 info.regdump_len = E100_REGS_LEN * sizeof(u32);
3634 info.eedump_len = (bdp->eeprom_size << 1);
3635 info.testinfo_len = max_test_res;
3636 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3637 return -EFAULT;
3638
3639 return 0;
3640 }
3641
3642 static int
e100_ethtool_eeprom(struct net_device * dev,struct ifreq * ifr)3643 e100_ethtool_eeprom(struct net_device *dev, struct ifreq *ifr)
3644 {
3645 struct e100_private *bdp;
3646 struct ethtool_eeprom ecmd;
3647 u16 eeprom_data[256];
3648 u16 *usr_eeprom_ptr;
3649 u16 first_word, last_word;
3650 int i, max_len;
3651 void *ptr;
3652 u8 *eeprom_data_bytes = (u8 *)eeprom_data;
3653
3654 bdp = dev->priv;
3655
3656 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
3657 return -EFAULT;
3658
3659 usr_eeprom_ptr =
3660 (u16 *) (ifr->ifr_data + offsetof(struct ethtool_eeprom, data));
3661
3662 max_len = bdp->eeprom_size * 2;
3663
3664 if (ecmd.offset > ecmd.offset + ecmd.len)
3665 return -EINVAL;
3666
3667 if ((ecmd.offset + ecmd.len) > max_len)
3668 ecmd.len = (max_len - ecmd.offset);
3669
3670 first_word = ecmd.offset >> 1;
3671 last_word = (ecmd.offset + ecmd.len - 1) >> 1;
3672
3673 if (first_word >= bdp->eeprom_size)
3674 return -EFAULT;
3675
3676 if (ecmd.cmd == ETHTOOL_GEEPROM) {
3677 for(i = 0; i <= (last_word - first_word); i++)
3678 eeprom_data[i] = e100_eeprom_read(bdp, first_word + i);
3679
3680 ecmd.magic = E100_EEPROM_MAGIC;
3681
3682 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3683 return -EFAULT;
3684
3685 if(ecmd.offset & 1)
3686 eeprom_data_bytes++;
3687 if (copy_to_user(usr_eeprom_ptr, eeprom_data_bytes, ecmd.len))
3688 return -EFAULT;
3689 } else {
3690 if (ecmd.magic != E100_EEPROM_MAGIC)
3691 return -EFAULT;
3692
3693 ptr = (void *)eeprom_data;
3694 if(ecmd.offset & 1) {
3695 /* need modification of first changed EEPROM word */
3696 /* only the second byte of the word is being modified */
3697 eeprom_data[0] = e100_eeprom_read(bdp, first_word);
3698 ptr++;
3699 }
3700 if((ecmd.offset + ecmd.len) & 1) {
3701 /* need modification of last changed EEPROM word */
3702 /* only the first byte of the word is being modified */
3703 eeprom_data[last_word - first_word] =
3704 e100_eeprom_read(bdp, last_word);
3705 }
3706 if(copy_from_user(ptr, usr_eeprom_ptr, ecmd.len))
3707 return -EFAULT;
3708
3709 e100_eeprom_write_block(bdp, first_word, eeprom_data,
3710 last_word - first_word + 1);
3711
3712 if (copy_to_user(ifr->ifr_data, &ecmd, sizeof (ecmd)))
3713 return -EFAULT;
3714 }
3715 return 0;
3716 }
3717
3718 #define E100_BLINK_INTERVAL (HZ/4)
3719 /**
3720 * e100_led_control
3721 * @bdp: atapter's private data struct
3722 * @led_mdi_op: led operation
3723 *
3724 * Software control over adapter's led. The possible operations are:
3725 * TURN LED OFF, TURN LED ON and RETURN LED CONTROL TO HARDWARE.
3726 */
3727 static void
e100_led_control(struct e100_private * bdp,u16 led_mdi_op)3728 e100_led_control(struct e100_private *bdp, u16 led_mdi_op)
3729 {
3730 e100_mdi_write(bdp, PHY_82555_LED_SWITCH_CONTROL,
3731 bdp->phy_addr, led_mdi_op);
3732
3733 }
3734 /**
3735 * e100_led_blink_callback
3736 * @data: pointer to atapter's private data struct
3737 *
3738 * Blink timer callback function. Toggles ON/OFF led status bit and calls
3739 * led hardware access function.
3740 */
3741 static void
e100_led_blink_callback(unsigned long data)3742 e100_led_blink_callback(unsigned long data)
3743 {
3744 struct e100_private *bdp = (struct e100_private *) data;
3745
3746 if(bdp->flags & LED_IS_ON) {
3747 bdp->flags &= ~LED_IS_ON;
3748 e100_led_control(bdp, PHY_82555_LED_OFF);
3749 } else {
3750 bdp->flags |= LED_IS_ON;
3751 if (bdp->rev_id >= D101MA_REV_ID)
3752 e100_led_control(bdp, PHY_82555_LED_ON_559);
3753 else
3754 e100_led_control(bdp, PHY_82555_LED_ON_PRE_559);
3755 }
3756
3757 mod_timer(&bdp->blink_timer, jiffies + E100_BLINK_INTERVAL);
3758 }
3759 /**
3760 * e100_ethtool_led_blink
3761 * @dev: pointer to atapter's net_device struct
3762 * @ifr: pointer to ioctl request structure
3763 *
3764 * Blink led ioctl handler. Initialtes blink timer and sleeps until
3765 * blink period expires. Than it kills timer and returns. The led control
3766 * is returned back to hardware when blink timer is killed.
3767 */
3768 static int
e100_ethtool_led_blink(struct net_device * dev,struct ifreq * ifr)3769 e100_ethtool_led_blink(struct net_device *dev, struct ifreq *ifr)
3770 {
3771 struct e100_private *bdp;
3772 struct ethtool_value ecmd;
3773
3774 bdp = dev->priv;
3775
3776 if (copy_from_user(&ecmd, ifr->ifr_data, sizeof (ecmd)))
3777 return -EFAULT;
3778
3779 if(!bdp->blink_timer.function) {
3780 init_timer(&bdp->blink_timer);
3781 bdp->blink_timer.function = e100_led_blink_callback;
3782 bdp->blink_timer.data = (unsigned long) bdp;
3783 }
3784
3785 mod_timer(&bdp->blink_timer, jiffies);
3786
3787 set_current_state(TASK_INTERRUPTIBLE);
3788
3789 if ((!ecmd.data) || (ecmd.data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
3790 ecmd.data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
3791
3792 schedule_timeout(ecmd.data * HZ);
3793
3794 del_timer_sync(&bdp->blink_timer);
3795
3796 e100_led_control(bdp, PHY_82555_LED_NORMAL_CONTROL);
3797
3798 return 0;
3799 }
3800
3801 static inline int __devinit
e100_10BaseT_adapter(struct e100_private * bdp)3802 e100_10BaseT_adapter(struct e100_private *bdp)
3803 {
3804 return ((bdp->pdev->device == 0x1229) &&
3805 (bdp->pdev->subsystem_vendor == 0x8086) &&
3806 (bdp->pdev->subsystem_device == 0x0003));
3807 }
3808
3809 static void __devinit
e100_get_speed_duplex_caps(struct e100_private * bdp)3810 e100_get_speed_duplex_caps(struct e100_private *bdp)
3811 {
3812 u16 status;
3813
3814 e100_mdi_read(bdp, MII_BMSR, bdp->phy_addr, &status);
3815
3816 bdp->speed_duplex_caps = 0;
3817
3818 bdp->speed_duplex_caps |=
3819 (status & BMSR_ANEGCAPABLE) ? SUPPORTED_Autoneg : 0;
3820
3821 bdp->speed_duplex_caps |=
3822 (status & BMSR_10HALF) ? SUPPORTED_10baseT_Half : 0;
3823
3824 bdp->speed_duplex_caps |=
3825 (status & BMSR_10FULL) ? SUPPORTED_10baseT_Full : 0;
3826
3827 bdp->speed_duplex_caps |=
3828 (status & BMSR_100HALF) ? SUPPORTED_100baseT_Half : 0;
3829
3830 bdp->speed_duplex_caps |=
3831 (status & BMSR_100FULL) ? SUPPORTED_100baseT_Full : 0;
3832
3833 if (IS_NC3133(bdp))
3834 bdp->speed_duplex_caps =
3835 (SUPPORTED_FIBRE | SUPPORTED_100baseT_Full);
3836 else
3837 bdp->speed_duplex_caps |= SUPPORTED_TP;
3838
3839 if ((status == 0xFFFF) && e100_10BaseT_adapter(bdp)) {
3840 bdp->speed_duplex_caps =
3841 (SUPPORTED_10baseT_Half | SUPPORTED_TP);
3842 } else {
3843 bdp->speed_duplex_caps |= SUPPORTED_MII;
3844 }
3845
3846 }
3847
3848 #ifdef CONFIG_PM
3849 static unsigned char
e100_setup_filter(struct e100_private * bdp)3850 e100_setup_filter(struct e100_private *bdp)
3851 {
3852 cb_header_t *ntcb_hdr;
3853 unsigned char res = false;
3854 nxmit_cb_entry_t *cmd;
3855
3856 if ((cmd = e100_alloc_non_tx_cmd(bdp)) == NULL) {
3857 goto exit;
3858 }
3859
3860 ntcb_hdr = (cb_header_t *) cmd->non_tx_cmd;
3861 ntcb_hdr->cb_cmd = __constant_cpu_to_le16(CB_LOAD_FILTER);
3862
3863 /* Set EL and FIX bit */
3864 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] =
3865 __constant_cpu_to_le32(CB_FILTER_EL | CB_FILTER_FIX);
3866
3867 if (bdp->wolopts & WAKE_UCAST) {
3868 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
3869 __constant_cpu_to_le32(CB_FILTER_IA_MATCH);
3870 }
3871
3872 if (bdp->wolopts & WAKE_ARP) {
3873 /* Setup ARP bit and lower IP parts */
3874 /* bdp->ip_lbytes contains 2 lower bytes of IP address in network byte order */
3875 (cmd->non_tx_cmd)->ntcb.filter.filter_data[0] |=
3876 cpu_to_le32(CB_FILTER_ARP | bdp->ip_lbytes);
3877 }
3878
3879 res = e100_exec_non_cu_cmd(bdp, cmd);
3880 if (!res)
3881 printk(KERN_WARNING "e100: %s: Filter setup failed\n",
3882 bdp->device->name);
3883
3884 exit:
3885 return res;
3886
3887 }
3888
3889 static void
e100_do_wol(struct pci_dev * pcid,struct e100_private * bdp)3890 e100_do_wol(struct pci_dev *pcid, struct e100_private *bdp)
3891 {
3892 e100_config_wol(bdp);
3893
3894 if (e100_config(bdp)) {
3895 if (bdp->wolopts & (WAKE_UCAST | WAKE_ARP))
3896 if (!e100_setup_filter(bdp))
3897 printk(KERN_ERR
3898 "e100: WOL options failed\n");
3899 } else {
3900 printk(KERN_ERR "e100: config WOL failed\n");
3901 }
3902 }
3903 #endif
3904
3905 static u16
e100_get_ip_lbytes(struct net_device * dev)3906 e100_get_ip_lbytes(struct net_device *dev)
3907 {
3908 struct in_ifaddr *ifa;
3909 struct in_device *in_dev;
3910 u32 res = 0;
3911
3912 in_dev = (struct in_device *) dev->ip_ptr;
3913 /* Check if any in_device bound to interface */
3914 if (in_dev) {
3915 /* Check if any IP address is bound to interface */
3916 if ((ifa = in_dev->ifa_list) != NULL) {
3917 res = __constant_ntohl(ifa->ifa_address);
3918 res = __constant_htons(res & 0x0000ffff);
3919 }
3920 }
3921 return res;
3922 }
3923
3924 static int
e100_ethtool_wol(struct net_device * dev,struct ifreq * ifr)3925 e100_ethtool_wol(struct net_device *dev, struct ifreq *ifr)
3926 {
3927 struct e100_private *bdp;
3928 struct ethtool_wolinfo wolinfo;
3929 int res = 0;
3930
3931 bdp = dev->priv;
3932
3933 if (copy_from_user(&wolinfo, ifr->ifr_data, sizeof (wolinfo))) {
3934 return -EFAULT;
3935 }
3936
3937 switch (wolinfo.cmd) {
3938 case ETHTOOL_GWOL:
3939 wolinfo.supported = bdp->wolsupported;
3940 wolinfo.wolopts = bdp->wolopts;
3941 if (copy_to_user(ifr->ifr_data, &wolinfo, sizeof (wolinfo)))
3942 res = -EFAULT;
3943 break;
3944 case ETHTOOL_SWOL:
3945 /* If ALL requests are supported or request is DISABLE wol */
3946 if (((wolinfo.wolopts & bdp->wolsupported) == wolinfo.wolopts)
3947 || (wolinfo.wolopts == 0)) {
3948 bdp->wolopts = wolinfo.wolopts;
3949 } else {
3950 res = -EOPNOTSUPP;
3951 }
3952 if (wolinfo.wolopts & WAKE_ARP)
3953 bdp->ip_lbytes = e100_get_ip_lbytes(dev);
3954 break;
3955 default:
3956 break;
3957 }
3958 return res;
3959 }
3960
e100_ethtool_gstrings(struct net_device * dev,struct ifreq * ifr)3961 static int e100_ethtool_gstrings(struct net_device *dev, struct ifreq *ifr)
3962 {
3963 struct ethtool_gstrings info;
3964 char *strings = NULL;
3965 char *usr_strings;
3966 int i;
3967
3968 memset((void *) &info, 0, sizeof(info));
3969
3970 usr_strings = (u8 *) (ifr->ifr_data +
3971 offsetof(struct ethtool_gstrings, data));
3972
3973 if (copy_from_user(&info, ifr->ifr_data, sizeof (info)))
3974 return -EFAULT;
3975
3976 switch (info.string_set) {
3977 case ETH_SS_TEST: {
3978 int ret = 0;
3979 if (info.len > max_test_res)
3980 info.len = max_test_res;
3981 strings = kmalloc(info.len * ETH_GSTRING_LEN, GFP_ATOMIC);
3982 if (!strings)
3983 return -ENOMEM;
3984 memset(strings, 0, info.len * ETH_GSTRING_LEN);
3985
3986 for (i = 0; i < info.len; i++) {
3987 sprintf(strings + i * ETH_GSTRING_LEN, "%s",
3988 test_strings[i]);
3989 }
3990 if (copy_to_user(ifr->ifr_data, &info, sizeof (info)))
3991 ret = -EFAULT;
3992 if (copy_to_user(usr_strings, strings, info.len * ETH_GSTRING_LEN))
3993 ret = -EFAULT;
3994 kfree(strings);
3995 return ret;
3996 }
3997 case ETH_SS_STATS: {
3998 char *strings = NULL;
3999 void *addr = ifr->ifr_data;
4000 info.len = E100_STATS_LEN;
4001 strings = *e100_gstrings_stats;
4002 if(copy_to_user(ifr->ifr_data, &info, sizeof(info)))
4003 return -EFAULT;
4004 addr += offsetof(struct ethtool_gstrings, data);
4005 if(copy_to_user(addr, strings,
4006 info.len * ETH_GSTRING_LEN))
4007 return -EFAULT;
4008 return 0;
4009 }
4010 default:
4011 return -EOPNOTSUPP;
4012 }
4013 }
4014
4015 static int
e100_mii_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4016 e100_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4017 {
4018 struct e100_private *bdp;
4019 struct mii_ioctl_data *data_ptr =
4020 (struct mii_ioctl_data *) &(ifr->ifr_data);
4021
4022 bdp = dev->priv;
4023
4024 switch (cmd) {
4025 case SIOCGMIIPHY:
4026 data_ptr->phy_id = bdp->phy_addr & 0x1f;
4027 break;
4028
4029 case SIOCGMIIREG:
4030 if (!capable(CAP_NET_ADMIN))
4031 return -EPERM;
4032 e100_mdi_read(bdp, data_ptr->reg_num & 0x1f, bdp->phy_addr,
4033 &(data_ptr->val_out));
4034 break;
4035
4036 case SIOCSMIIREG:
4037 if (!capable(CAP_NET_ADMIN))
4038 return -EPERM;
4039 /* If reg = 0 && change speed/duplex */
4040 if (data_ptr->reg_num == 0 &&
4041 (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART) /* restart cmd */
4042 || data_ptr->val_in == (BMCR_RESET) /* reset cmd */
4043 || data_ptr->val_in & (BMCR_SPEED100 | BMCR_FULLDPLX)
4044 || data_ptr->val_in == 0)) {
4045 if (data_ptr->val_in == (BMCR_ANENABLE | BMCR_ANRESTART)
4046 || data_ptr->val_in == (BMCR_RESET))
4047 bdp->params.e100_speed_duplex = E100_AUTONEG;
4048 else if (data_ptr->val_in == (BMCR_SPEED100 | BMCR_FULLDPLX))
4049 bdp->params.e100_speed_duplex = E100_SPEED_100_FULL;
4050 else if (data_ptr->val_in == (BMCR_SPEED100))
4051 bdp->params.e100_speed_duplex = E100_SPEED_100_HALF;
4052 else if (data_ptr->val_in == (BMCR_FULLDPLX))
4053 bdp->params.e100_speed_duplex = E100_SPEED_10_FULL;
4054 else
4055 bdp->params.e100_speed_duplex = E100_SPEED_10_HALF;
4056 if (netif_running(dev)) {
4057 spin_lock_bh(&dev->xmit_lock);
4058 e100_close(dev);
4059 spin_unlock_bh(&dev->xmit_lock);
4060 e100_hw_init(bdp);
4061 e100_open(dev);
4062 }
4063 }
4064 else
4065 /* Only allows changing speed/duplex */
4066 return -EINVAL;
4067
4068 break;
4069
4070 default:
4071 return -EOPNOTSUPP;
4072 }
4073 return 0;
4074 }
4075
4076 nxmit_cb_entry_t *
e100_alloc_non_tx_cmd(struct e100_private * bdp)4077 e100_alloc_non_tx_cmd(struct e100_private *bdp)
4078 {
4079 nxmit_cb_entry_t *non_tx_cmd_elem;
4080
4081 if (!(non_tx_cmd_elem = (nxmit_cb_entry_t *)
4082 kmalloc(sizeof (nxmit_cb_entry_t), GFP_ATOMIC))) {
4083 return NULL;
4084 }
4085 non_tx_cmd_elem->non_tx_cmd =
4086 pci_alloc_consistent(bdp->pdev, sizeof (nxmit_cb_t),
4087 &(non_tx_cmd_elem->dma_addr));
4088 if (non_tx_cmd_elem->non_tx_cmd == NULL) {
4089 kfree(non_tx_cmd_elem);
4090 return NULL;
4091 }
4092 return non_tx_cmd_elem;
4093 }
4094
4095 void
e100_free_non_tx_cmd(struct e100_private * bdp,nxmit_cb_entry_t * non_tx_cmd_elem)4096 e100_free_non_tx_cmd(struct e100_private *bdp,
4097 nxmit_cb_entry_t *non_tx_cmd_elem)
4098 {
4099 pci_free_consistent(bdp->pdev, sizeof (nxmit_cb_t),
4100 non_tx_cmd_elem->non_tx_cmd,
4101 non_tx_cmd_elem->dma_addr);
4102 kfree(non_tx_cmd_elem);
4103 }
4104
4105 static void
e100_free_nontx_list(struct e100_private * bdp)4106 e100_free_nontx_list(struct e100_private *bdp)
4107 {
4108 nxmit_cb_entry_t *command;
4109 int i;
4110
4111 while (!list_empty(&bdp->non_tx_cmd_list)) {
4112 command = list_entry(bdp->non_tx_cmd_list.next,
4113 nxmit_cb_entry_t, list_elem);
4114 list_del(&(command->list_elem));
4115 e100_free_non_tx_cmd(bdp, command);
4116 }
4117
4118 for (i = 0; i < CB_MAX_NONTX_CMD; i++) {
4119 bdp->same_cmd_entry[i] = NULL;
4120 }
4121 }
4122
4123 static unsigned char
e100_delayed_exec_non_cu_cmd(struct e100_private * bdp,nxmit_cb_entry_t * command)4124 e100_delayed_exec_non_cu_cmd(struct e100_private *bdp,
4125 nxmit_cb_entry_t *command)
4126 {
4127 nxmit_cb_entry_t *same_command;
4128 cb_header_t *ntcb_hdr;
4129 u16 cmd;
4130
4131 ntcb_hdr = (cb_header_t *) command->non_tx_cmd;
4132
4133 cmd = CB_CMD_MASK & le16_to_cpu(ntcb_hdr->cb_cmd);
4134
4135 spin_lock_bh(&(bdp->bd_non_tx_lock));
4136
4137 same_command = bdp->same_cmd_entry[cmd];
4138
4139 if (same_command != NULL) {
4140 memcpy((void *) (same_command->non_tx_cmd),
4141 (void *) (command->non_tx_cmd), sizeof (nxmit_cb_t));
4142 e100_free_non_tx_cmd(bdp, command);
4143 } else {
4144 list_add_tail(&(command->list_elem), &(bdp->non_tx_cmd_list));
4145 bdp->same_cmd_entry[cmd] = command;
4146 }
4147
4148 if (bdp->non_tx_command_state == E100_NON_TX_IDLE) {
4149 bdp->non_tx_command_state = E100_WAIT_TX_FINISH;
4150 mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
4151 }
4152
4153 spin_unlock_bh(&(bdp->bd_non_tx_lock));
4154 return true;
4155 }
4156
4157 static void
e100_non_tx_background(unsigned long ptr)4158 e100_non_tx_background(unsigned long ptr)
4159 {
4160 struct e100_private *bdp = (struct e100_private *) ptr;
4161 nxmit_cb_entry_t *active_command;
4162 int restart = true;
4163 cb_header_t *non_tx_cmd;
4164 u8 sub_cmd;
4165
4166 spin_lock_bh(&(bdp->bd_non_tx_lock));
4167
4168 switch (bdp->non_tx_command_state) {
4169 case E100_WAIT_TX_FINISH:
4170 if (bdp->last_tcb != NULL) {
4171 rmb();
4172 if ((bdp->last_tcb->tcb_hdr.cb_status &
4173 __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
4174 goto exit;
4175 }
4176 if ((readw(&bdp->scb->scb_status) & SCB_CUS_MASK) ==
4177 SCB_CUS_ACTIVE) {
4178 goto exit;
4179 }
4180 break;
4181
4182 case E100_WAIT_NON_TX_FINISH:
4183 active_command = list_entry(bdp->non_tx_cmd_list.next,
4184 nxmit_cb_entry_t, list_elem);
4185 rmb();
4186
4187 if (((((cb_header_t *) (active_command->non_tx_cmd))->cb_status
4188 & __constant_cpu_to_le16(CB_STATUS_COMPLETE)) == 0)
4189 && time_before(jiffies, active_command->expiration_time)) {
4190 goto exit;
4191 } else {
4192 non_tx_cmd = (cb_header_t *) active_command->non_tx_cmd;
4193 sub_cmd = CB_CMD_MASK & le16_to_cpu(non_tx_cmd->cb_cmd);
4194 #ifdef E100_CU_DEBUG
4195 if (!(non_tx_cmd->cb_status
4196 & __constant_cpu_to_le16(CB_STATUS_COMPLETE)))
4197 printk(KERN_ERR "e100: %s: Queued "
4198 "command (%x) timeout\n",
4199 bdp->device->name, sub_cmd);
4200 #endif
4201 list_del(&(active_command->list_elem));
4202 e100_free_non_tx_cmd(bdp, active_command);
4203 }
4204 break;
4205
4206 default:
4207 break;
4208 } //switch
4209
4210 if (list_empty(&bdp->non_tx_cmd_list)) {
4211 bdp->non_tx_command_state = E100_NON_TX_IDLE;
4212 spin_lock_irq(&(bdp->bd_lock));
4213 bdp->next_cu_cmd = START_WAIT;
4214 spin_unlock_irq(&(bdp->bd_lock));
4215 restart = false;
4216 goto exit;
4217 } else {
4218 u16 cmd_type;
4219
4220 bdp->non_tx_command_state = E100_WAIT_NON_TX_FINISH;
4221 active_command = list_entry(bdp->non_tx_cmd_list.next,
4222 nxmit_cb_entry_t, list_elem);
4223 sub_cmd = ((cb_header_t *) active_command->non_tx_cmd)->cb_cmd;
4224 spin_lock_irq(&(bdp->bd_lock));
4225 e100_wait_exec_cmplx(bdp, active_command->dma_addr,
4226 SCB_CUC_START, sub_cmd);
4227 spin_unlock_irq(&(bdp->bd_lock));
4228 active_command->expiration_time = jiffies + HZ;
4229 cmd_type = CB_CMD_MASK &
4230 le16_to_cpu(((cb_header_t *)
4231 (active_command->non_tx_cmd))->cb_cmd);
4232 bdp->same_cmd_entry[cmd_type] = NULL;
4233 }
4234
4235 exit:
4236 if (restart) {
4237 mod_timer(&(bdp->nontx_timer_id), jiffies + 1);
4238 } else {
4239 if (netif_running(bdp->device))
4240 netif_wake_queue(bdp->device);
4241 }
4242 spin_unlock_bh(&(bdp->bd_non_tx_lock));
4243 }
4244
4245 static void
e100_vlan_rx_register(struct net_device * netdev,struct vlan_group * grp)4246 e100_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4247 {
4248 struct e100_private *bdp = netdev->priv;
4249
4250 e100_disable_clear_intr(bdp);
4251 bdp->vlgrp = grp;
4252
4253 if(grp) {
4254 /* enable VLAN tag insert/strip */
4255 e100_config_vlan_drop(bdp, true);
4256
4257 } else {
4258 /* disable VLAN tag insert/strip */
4259 e100_config_vlan_drop(bdp, false);
4260 }
4261
4262 e100_config(bdp);
4263 e100_set_intr_mask(bdp);
4264 }
4265
4266 static void
e100_vlan_rx_add_vid(struct net_device * netdev,u16 vid)4267 e100_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4268 {
4269 /* We don't do Vlan filtering */
4270 return;
4271 }
4272
4273 static void
e100_vlan_rx_kill_vid(struct net_device * netdev,u16 vid)4274 e100_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4275 {
4276 struct e100_private *bdp = netdev->priv;
4277
4278 if(bdp->vlgrp)
4279 bdp->vlgrp->vlan_devices[vid] = NULL;
4280 /* We don't do Vlan filtering */
4281 return;
4282 }
4283
4284 #ifdef CONFIG_PM
4285 static int
e100_notify_reboot(struct notifier_block * nb,unsigned long event,void * p)4286 e100_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
4287 {
4288 struct pci_dev *pdev = NULL;
4289
4290 switch(event) {
4291 case SYS_DOWN:
4292 case SYS_HALT:
4293 case SYS_POWER_OFF:
4294 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
4295 if(pci_dev_driver(pdev) == &e100_driver) {
4296 /* If net_device struct is allocated? */
4297 if (pci_get_drvdata(pdev))
4298 e100_suspend(pdev, 3);
4299
4300 }
4301 }
4302 }
4303 return NOTIFY_DONE;
4304 }
4305
4306 static int
e100_suspend(struct pci_dev * pcid,u32 state)4307 e100_suspend(struct pci_dev *pcid, u32 state)
4308 {
4309 struct net_device *netdev = pci_get_drvdata(pcid);
4310 struct e100_private *bdp = netdev->priv;
4311
4312 e100_isolate_driver(bdp);
4313 pci_save_state(pcid, bdp->pci_state);
4314
4315 /* Enable or disable WoL */
4316 e100_do_wol(pcid, bdp);
4317
4318 /* If wol is enabled */
4319 if (bdp->wolopts || e100_asf_enabled(bdp)) {
4320 pci_enable_wake(pcid, 3, 1); /* Enable PME for power state D3 */
4321 pci_set_power_state(pcid, 3); /* Set power state to D3. */
4322 } else {
4323 /* Disable bus mastering */
4324 pci_disable_device(pcid);
4325 pci_set_power_state(pcid, state);
4326 }
4327 return 0;
4328 }
4329
4330 static int
e100_resume(struct pci_dev * pcid)4331 e100_resume(struct pci_dev *pcid)
4332 {
4333 struct net_device *netdev = pci_get_drvdata(pcid);
4334 struct e100_private *bdp = netdev->priv;
4335
4336 pci_set_power_state(pcid, 0);
4337 pci_enable_wake(pcid, 0, 0); /* Clear PME status and disable PME */
4338 pci_restore_state(pcid, bdp->pci_state);
4339
4340 /* Also do device full reset because device was in D3 state */
4341 e100_deisolate_driver(bdp, true);
4342
4343 return 0;
4344 }
4345
4346 /**
4347 * e100_asf_enabled - checks if ASF is configured on the current adaper
4348 * by reading registers 0xD and 0x90 in the EEPROM
4349 * @bdp: atapter's private data struct
4350 *
4351 * Returns: true if ASF is enabled
4352 */
4353 static unsigned char
e100_asf_enabled(struct e100_private * bdp)4354 e100_asf_enabled(struct e100_private *bdp)
4355 {
4356 u16 asf_reg;
4357 u16 smbus_addr_reg;
4358 if ((bdp->pdev->device >= 0x1050) && (bdp->pdev->device <= 0x1055)) {
4359 asf_reg = e100_eeprom_read(bdp, EEPROM_CONFIG_ASF);
4360 if ((asf_reg & EEPROM_FLAG_ASF)
4361 && !(asf_reg & EEPROM_FLAG_GCL)) {
4362 smbus_addr_reg =
4363 e100_eeprom_read(bdp, EEPROM_SMBUS_ADDR);
4364 if ((smbus_addr_reg & 0xFF) != 0xFE)
4365 return true;
4366 }
4367 }
4368 return false;
4369 }
4370 #endif /* CONFIG_PM */
4371
4372 #ifdef E100_CU_DEBUG
4373 unsigned char
e100_cu_unknown_state(struct e100_private * bdp)4374 e100_cu_unknown_state(struct e100_private *bdp)
4375 {
4376 u8 scb_cmd_low;
4377 u16 scb_status;
4378 scb_cmd_low = bdp->scb->scb_cmd_low;
4379 scb_status = le16_to_cpu(bdp->scb->scb_status);
4380 /* If CU is active and executing unknown cmd */
4381 if (scb_status & SCB_CUS_ACTIVE && scb_cmd_low & SCB_CUC_UNKNOWN)
4382 return true;
4383 else
4384 return false;
4385 }
4386 #endif
4387
4388 #ifdef CONFIG_NET_POLL_CONTROLLER
4389 /*
4390 * Polling 'interrupt' - used by things like netconsole to send skbs
4391 * without having to re-enable interrupts. It's not called while
4392 * the interrupt routine is executing.
4393 */
4394
e100_netpoll(struct net_device * dev)4395 static void e100_netpoll (struct net_device *dev)
4396 {
4397 struct e100_private *adapter = dev->priv;
4398 disable_irq(adapter->pdev->irq);
4399 e100intr (adapter->pdev->irq, dev, NULL);
4400 enable_irq(adapter->pdev->irq);
4401 }
4402 #endif
4403