1
2 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2004 Advanced Micro Devices
4 *
5 *
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
14 *
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 * USA
30
31 Module Name:
32
33 amd8111e.c
34
35 Abstract:
36
37 AMD8111 based 10/100 Ethernet Controller Driver.
38
39 Environment:
40
41 Kernel Mode
42
43 Revision History:
44 3.0.0
45 Initial Revision.
46 3.0.1
47 1. Dynamic interrupt coalescing.
48 2. Removed prev_stats.
49 3. MII support.
50 4. Dynamic IPG support
51 3.0.2 05/29/2003
52 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53 2. Bug fix: Fixed VLAN support failure.
54 3. Bug fix: Fixed receive interrupt coalescing bug.
55 4. Dynamic IPG support is disabled by default.
56 3.0.3 06/05/2003
57 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
58 3.0.4 12/09/2003
59 1. Added set_mac_address routine for bonding driver support.
60 2. Tested the driver for bonding support
61 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
62 indicated to the h/w.
63 4. Modified amd8111e_rx() routine to receive all the received packets
64 in the first interrupt.
65 5. Bug fix: Corrected rx_errors reported in get_stats() function.
66
67 */
68
69
70 #include <linux/config.h>
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/types.h>
74 #include <linux/compiler.h>
75 #include <linux/slab.h>
76 #include <linux/delay.h>
77 #include <linux/init.h>
78 #include <linux/ioport.h>
79 #include <linux/pci.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/ethtool.h>
84 #include <linux/mii.h>
85 #include <linux/if_vlan.h>
86 #include <linux/ctype.h>
87 #include <linux/crc32.h>
88
89 #include <asm/system.h>
90 #include <asm/io.h>
91 #include <asm/byteorder.h>
92 #include <asm/uaccess.h>
93
94 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
95 #define AMD8111E_VLAN_TAG_USED 1
96 #else
97 #define AMD8111E_VLAN_TAG_USED 0
98 #endif
99
100 #include "amd8111e.h"
101 #define MODULE_NAME "amd8111e"
102 #define MODULE_VERSION "3.0.4"
103 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
104 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.4");
105 MODULE_LICENSE("GPL");
106 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
107 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
108 MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
109 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
110 MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
111 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
112
113 static struct pci_device_id amd8111e_pci_tbl[] __devinitdata = {
114
115 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
117 { 0, }
118
119 };
120 /*
121 This function will read the PHY registers.
122 */
amd8111e_read_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 * val)123 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
124 {
125 void * mmio = lp->mmio;
126 unsigned int reg_val;
127 unsigned int repeat= REPEAT_CNT;
128
129 reg_val = readl(mmio + PHY_ACCESS);
130 while (reg_val & PHY_CMD_ACTIVE)
131 reg_val = readl( mmio + PHY_ACCESS );
132
133 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
134 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
135 do{
136 reg_val = readl(mmio + PHY_ACCESS);
137 udelay(30); /* It takes 30 us to read/write data */
138 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
139 if(reg_val & PHY_RD_ERR)
140 goto err_phy_read;
141
142 *val = reg_val & 0xffff;
143 return 0;
144 err_phy_read:
145 *val = 0;
146 return -EINVAL;
147
148 }
149
150 /*
151 This function will write into PHY registers.
152 */
amd8111e_write_phy(struct amd8111e_priv * lp,int phy_id,int reg,u32 val)153 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
154 {
155 unsigned int repeat = REPEAT_CNT
156 void * mmio = lp->mmio;
157 unsigned int reg_val;
158
159 reg_val = readl(mmio + PHY_ACCESS);
160 while (reg_val & PHY_CMD_ACTIVE)
161 reg_val = readl( mmio + PHY_ACCESS );
162
163 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
164 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
165
166 do{
167 reg_val = readl(mmio + PHY_ACCESS);
168 udelay(30); /* It takes 30 us to read/write the data */
169 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
170
171 if(reg_val & PHY_RD_ERR)
172 goto err_phy_write;
173
174 return 0;
175
176 err_phy_write:
177 return -EINVAL;
178
179 }
180 /*
181 This is the mii register read function provided to the mii interface.
182 */
amd8111e_mdio_read(struct net_device * dev,int phy_id,int reg_num)183 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
184 {
185 struct amd8111e_priv* lp = dev->priv;
186 unsigned int reg_val;
187
188 amd8111e_read_phy(lp,phy_id,reg_num,®_val);
189 return reg_val;
190
191 }
192
193 /*
194 This is the mii register write function provided to the mii interface.
195 */
amd8111e_mdio_write(struct net_device * dev,int phy_id,int reg_num,int val)196 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
197 {
198 struct amd8111e_priv* lp = dev->priv;
199
200 amd8111e_write_phy(lp, phy_id, reg_num, val);
201 }
202
203 /*
204 This function will set PHY speed. During initialization sets the original speed to 100 full.
205 */
amd8111e_set_ext_phy(struct net_device * dev)206 static void amd8111e_set_ext_phy(struct net_device *dev)
207 {
208 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
209 u32 bmcr,advert,tmp;
210
211 /* Determine mii register values to set the speed */
212 advert = amd8111e_mdio_read(dev, PHY_ID, MII_ADVERTISE);
213 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
214 switch (lp->ext_phy_option){
215
216 default:
217 case SPEED_AUTONEG: /* advertise all values */
218 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
219 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
220 break;
221 case SPEED10_HALF:
222 tmp |= ADVERTISE_10HALF;
223 break;
224 case SPEED10_FULL:
225 tmp |= ADVERTISE_10FULL;
226 break;
227 case SPEED100_HALF:
228 tmp |= ADVERTISE_100HALF;
229 break;
230 case SPEED100_FULL:
231 tmp |= ADVERTISE_100FULL;
232 break;
233 }
234
235 if(advert != tmp)
236 amd8111e_mdio_write(dev, PHY_ID, MII_ADVERTISE, tmp);
237 /* Restart auto negotiation */
238 bmcr = amd8111e_mdio_read(dev, PHY_ID, MII_BMCR);
239 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
240 amd8111e_mdio_write(dev, PHY_ID, MII_BMCR, bmcr);
241
242 }
243
244 /*
245 This function will unmap skb->data space and will free
246 all transmit and receive skbuffs.
247 */
amd8111e_free_skbs(struct net_device * dev)248 static int amd8111e_free_skbs(struct net_device *dev)
249 {
250 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
251 struct sk_buff* rx_skbuff;
252 int i;
253
254 /* Freeing transmit skbs */
255 for(i = 0; i < NUM_TX_BUFFERS; i++){
256 if(lp->tx_skbuff[i]){
257 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
258 dev_kfree_skb (lp->tx_skbuff[i]);
259 lp->tx_skbuff[i] = NULL;
260 lp->tx_dma_addr[i] = 0;
261 }
262 }
263 /* Freeing previously allocated receive buffers */
264 for (i = 0; i < NUM_RX_BUFFERS; i++){
265 rx_skbuff = lp->rx_skbuff[i];
266 if(rx_skbuff != NULL){
267 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
268 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
269 dev_kfree_skb(lp->rx_skbuff[i]);
270 lp->rx_skbuff[i] = NULL;
271 lp->rx_dma_addr[i] = 0;
272 }
273 }
274
275 return 0;
276 }
277
278 /*
279 This will set the receive buffer length corresponding to the mtu size of networkinterface.
280 */
amd8111e_set_rx_buff_len(struct net_device * dev)281 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
282 {
283 struct amd8111e_priv* lp = dev->priv;
284 unsigned int mtu = dev->mtu;
285
286 if (mtu > ETH_DATA_LEN){
287 /* MTU + ethernet header + FCS
288 + optional VLAN tag + skb reserve space 2 */
289
290 lp->rx_buff_len = mtu + ETH_HLEN + 10;
291 lp->options |= OPTION_JUMBO_ENABLE;
292 } else{
293 lp->rx_buff_len = PKT_BUFF_SZ;
294 lp->options &= ~OPTION_JUMBO_ENABLE;
295 }
296 }
297
298 /*
299 This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
300 */
amd8111e_init_ring(struct net_device * dev)301 static int amd8111e_init_ring(struct net_device *dev)
302 {
303 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
304 int i;
305
306 lp->rx_idx = lp->tx_idx = 0;
307 lp->tx_complete_idx = 0;
308 lp->tx_ring_idx = 0;
309
310
311 if(lp->opened)
312 /* Free previously allocated transmit and receive skbs */
313 amd8111e_free_skbs(dev);
314
315 else{
316 /* allocate the tx and rx descriptors */
317 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
318 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
319 &lp->tx_ring_dma_addr)) == NULL)
320
321 goto err_no_mem;
322
323 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
324 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
325 &lp->rx_ring_dma_addr)) == NULL)
326
327 goto err_free_tx_ring;
328
329 }
330 /* Set new receive buff size */
331 amd8111e_set_rx_buff_len(dev);
332
333 /* Allocating receive skbs */
334 for (i = 0; i < NUM_RX_BUFFERS; i++) {
335
336 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
337 /* Release previos allocated skbs */
338 for(--i; i >= 0 ;i--)
339 dev_kfree_skb(lp->rx_skbuff[i]);
340 goto err_free_rx_ring;
341 }
342 skb_reserve(lp->rx_skbuff[i],2);
343 }
344 /* Initilaizing receive descriptors */
345 for (i = 0; i < NUM_RX_BUFFERS; i++) {
346 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
347 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
348
349 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
350 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
351 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
352 }
353
354 /* Initializing transmit descriptors */
355 for (i = 0; i < NUM_TX_RING_DR; i++) {
356 lp->tx_ring[i].buff_phy_addr = 0;
357 lp->tx_ring[i].tx_flags = 0;
358 lp->tx_ring[i].buff_count = 0;
359 }
360
361 return 0;
362
363 err_free_rx_ring:
364
365 pci_free_consistent(lp->pci_dev,
366 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
367 lp->rx_ring_dma_addr);
368
369 err_free_tx_ring:
370
371 pci_free_consistent(lp->pci_dev,
372 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
373 lp->tx_ring_dma_addr);
374
375 err_no_mem:
376 return -ENOMEM;
377 }
378 /* This function will set the interrupt coalescing according to the input arguments */
amd8111e_set_coalesce(struct net_device * dev,enum coal_mode cmod)379 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
380 {
381 unsigned int timeout;
382 unsigned int event_count;
383
384 struct amd8111e_priv *lp = dev->priv;
385 void* mmio = lp->mmio;
386 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
387
388
389 switch(cmod)
390 {
391 case RX_INTR_COAL :
392 timeout = coal_conf->rx_timeout;
393 event_count = coal_conf->rx_event_count;
394 if( timeout > MAX_TIMEOUT ||
395 event_count > MAX_EVENT_COUNT )
396 return -EINVAL;
397
398 timeout = timeout * DELAY_TIMER_CONV;
399 writel(VAL0|STINTEN, mmio+INTEN0);
400 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
401 mmio+DLY_INT_A);
402 break;
403
404 case TX_INTR_COAL :
405 timeout = coal_conf->tx_timeout;
406 event_count = coal_conf->tx_event_count;
407 if( timeout > MAX_TIMEOUT ||
408 event_count > MAX_EVENT_COUNT )
409 return -EINVAL;
410
411
412 timeout = timeout * DELAY_TIMER_CONV;
413 writel(VAL0|STINTEN,mmio+INTEN0);
414 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
415 mmio+DLY_INT_B);
416 break;
417
418 case DISABLE_COAL:
419 writel(0,mmio+STVAL);
420 writel(STINTEN, mmio+INTEN0);
421 writel(0, mmio +DLY_INT_B);
422 writel(0, mmio+DLY_INT_A);
423 break;
424 case ENABLE_COAL:
425 /* Start the timer */
426 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
427 writel(VAL0|STINTEN, mmio+INTEN0);
428 break;
429 default:
430 break;
431
432 }
433 return 0;
434
435 }
436
437 /*
438 This function initializes the device registers and starts the device.
439 */
amd8111e_restart(struct net_device * dev)440 static int amd8111e_restart(struct net_device *dev)
441 {
442 struct amd8111e_priv *lp = (struct amd8111e_priv* )dev->priv;
443 void * mmio = lp->mmio;
444 int i,reg_val;
445
446 /* stop the chip */
447 writel(RUN, mmio + CMD0);
448
449 if(amd8111e_init_ring(dev))
450 return -ENOMEM;
451
452 /* enable the port manager and set auto negotiation always */
453 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
454 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
455
456 amd8111e_set_ext_phy(dev);
457
458 /* set control registers */
459 reg_val = readl(mmio + CTRL1);
460 reg_val &= ~XMTSP_MASK;
461 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
462
463 /* enable interrupt */
464 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
465 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
466 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
467
468 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
469
470 /* initialize tx and rx ring base addresses */
471 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
472 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
473
474 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
475 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
476
477 /* set default IPG to 96 */
478 writew((u32)DEFAULT_IPG,mmio+IPG);
479 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
480
481 if(lp->options & OPTION_JUMBO_ENABLE){
482 writel((u32)VAL2|JUMBO, mmio + CMD3);
483 /* Reset REX_UFLO */
484 writel( REX_UFLO, mmio + CMD2);
485 /* Should not set REX_UFLO for jumbo frames */
486 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
487 }else{
488 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
489 writel((u32)JUMBO, mmio + CMD3);
490 }
491
492 #if AMD8111E_VLAN_TAG_USED
493 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
494 #endif
495 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
496
497 /* Setting the MAC address to the device */
498 for(i = 0; i < ETH_ADDR_LEN; i++)
499 writeb( dev->dev_addr[i], mmio + PADR + i );
500
501 /* Enable interrupt coalesce */
502 if(lp->options & OPTION_INTR_COAL_ENABLE){
503 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
504 dev->name);
505 amd8111e_set_coalesce(dev,ENABLE_COAL);
506 }
507
508 /* set RUN bit to start the chip */
509 writel(VAL2 | RDMD0, mmio + CMD0);
510 writel(VAL0 | INTREN | RUN, mmio + CMD0);
511
512 /* To avoid PCI posting bug */
513 readl(mmio+CMD0);
514 return 0;
515 }
516 /*
517 This function clears necessary the device registers.
518 */
amd8111e_init_hw_default(struct amd8111e_priv * lp)519 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
520 {
521 unsigned int reg_val;
522 unsigned int logic_filter[2] ={0,};
523 void * mmio = lp->mmio;
524
525
526 /* stop the chip */
527 writel(RUN, mmio + CMD0);
528
529 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
530 writew( 0x8101, mmio + AUTOPOLL0);
531
532 /* Clear RCV_RING_BASE_ADDR */
533 writel(0, mmio + RCV_RING_BASE_ADDR0);
534
535 /* Clear XMT_RING_BASE_ADDR */
536 writel(0, mmio + XMT_RING_BASE_ADDR0);
537 writel(0, mmio + XMT_RING_BASE_ADDR1);
538 writel(0, mmio + XMT_RING_BASE_ADDR2);
539 writel(0, mmio + XMT_RING_BASE_ADDR3);
540
541 /* Clear CMD0 */
542 writel(CMD0_CLEAR,mmio + CMD0);
543
544 /* Clear CMD2 */
545 writel(CMD2_CLEAR, mmio +CMD2);
546
547 /* Clear CMD7 */
548 writel(CMD7_CLEAR , mmio + CMD7);
549
550 /* Clear DLY_INT_A and DLY_INT_B */
551 writel(0x0, mmio + DLY_INT_A);
552 writel(0x0, mmio + DLY_INT_B);
553
554 /* Clear FLOW_CONTROL */
555 writel(0x0, mmio + FLOW_CONTROL);
556
557 /* Clear INT0 write 1 to clear register */
558 reg_val = readl(mmio + INT0);
559 writel(reg_val, mmio + INT0);
560
561 /* Clear STVAL */
562 writel(0x0, mmio + STVAL);
563
564 /* Clear INTEN0 */
565 writel( INTEN0_CLEAR, mmio + INTEN0);
566
567 /* Clear LADRF */
568 writel(0x0 , mmio + LADRF);
569
570 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
571 writel( 0x80010,mmio + SRAM_SIZE);
572
573 /* Clear RCV_RING0_LEN */
574 writel(0x0, mmio + RCV_RING_LEN0);
575
576 /* Clear XMT_RING0/1/2/3_LEN */
577 writel(0x0, mmio + XMT_RING_LEN0);
578 writel(0x0, mmio + XMT_RING_LEN1);
579 writel(0x0, mmio + XMT_RING_LEN2);
580 writel(0x0, mmio + XMT_RING_LEN3);
581
582 /* Clear XMT_RING_LIMIT */
583 writel(0x0, mmio + XMT_RING_LIMIT);
584
585 /* Clear MIB */
586 writew(MIB_CLEAR, mmio + MIB_ADDR);
587
588 /* Clear LARF */
589 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
590
591 /* SRAM_SIZE register */
592 reg_val = readl(mmio + SRAM_SIZE);
593
594 if(lp->options & OPTION_JUMBO_ENABLE)
595 writel( VAL2|JUMBO, mmio + CMD3);
596 #if AMD8111E_VLAN_TAG_USED
597 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
598 #endif
599 /* Set default value to CTRL1 Register */
600 writel(CTRL1_DEFAULT, mmio + CTRL1);
601
602 /* To avoid PCI posting bug */
603 readl(mmio + CMD2);
604
605 }
606
607 /*
608 This function disables the interrupt and clears all the pending
609 interrupts in INT0
610 */
amd8111e_disable_interrupt(struct amd8111e_priv * lp)611 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
612 {
613 u32 intr0;
614
615 /* Disable interrupt */
616 writel(INTREN, lp->mmio + CMD0);
617
618 /* Clear INT0 */
619 intr0 = readl(lp->mmio + INT0);
620 writel(intr0, lp->mmio + INT0);
621
622 /* To avoid PCI posting bug */
623 readl(lp->mmio + INT0);
624
625 }
626
627 /*
628 This function stops the chip.
629 */
amd8111e_stop_chip(struct amd8111e_priv * lp)630 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
631 {
632 writel(RUN, lp->mmio + CMD0);
633
634 /* To avoid PCI posting bug */
635 readl(lp->mmio + CMD0);
636 }
637
638 /*
639 This function frees the transmiter and receiver descriptor rings.
640 */
amd8111e_free_ring(struct amd8111e_priv * lp)641 static void amd8111e_free_ring(struct amd8111e_priv* lp)
642 {
643
644 /* Free transmit and receive skbs */
645 amd8111e_free_skbs(lp->amd8111e_net_dev);
646
647 /* Free transmit and receive descriptor rings */
648 if(lp->rx_ring){
649 pci_free_consistent(lp->pci_dev,
650 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
651 lp->rx_ring, lp->rx_ring_dma_addr);
652 lp->rx_ring = NULL;
653 }
654
655 if(lp->tx_ring){
656 pci_free_consistent(lp->pci_dev,
657 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
658 lp->tx_ring, lp->tx_ring_dma_addr);
659
660 lp->tx_ring = NULL;
661 }
662
663 }
664 #if AMD8111E_VLAN_TAG_USED
665 /*
666 This is the receive indication function for packets with vlan tag.
667 */
amd8111e_vlan_rx(struct amd8111e_priv * lp,struct sk_buff * skb,u16 vlan_tag)668 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
669 {
670 return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
671 }
672 #endif
673
674 /*
675 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
676 */
amd8111e_tx(struct net_device * dev)677 static int amd8111e_tx(struct net_device *dev)
678 {
679 struct amd8111e_priv* lp = dev->priv;
680 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
681 int status;
682 /* Complete all the transmit packet */
683 while (lp->tx_complete_idx != lp->tx_idx){
684 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
685 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
686
687 if(status & OWN_BIT)
688 break; /* It still hasn't been Txed */
689
690 lp->tx_ring[tx_index].buff_phy_addr = 0;
691
692 /* We must free the original skb */
693 if (lp->tx_skbuff[tx_index]) {
694 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
695 lp->tx_skbuff[tx_index]->len,
696 PCI_DMA_TODEVICE);
697 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
698 lp->tx_skbuff[tx_index] = 0;
699 lp->tx_dma_addr[tx_index] = 0;
700 }
701 lp->tx_complete_idx++;
702 /*COAL update tx coalescing parameters */
703 lp->coal_conf.tx_packets++;
704 lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
705
706 if (netif_queue_stopped(dev) &&
707 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
708 /* The ring is no longer full, clear tbusy. */
709 /* lp->tx_full = 0; */
710 netif_wake_queue (dev);
711 }
712 }
713 return 0;
714 }
715
716 /*
717 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
718 */
amd8111e_rx(struct net_device * dev)719 static int amd8111e_rx(struct net_device *dev)
720 {
721 struct amd8111e_priv *lp = dev->priv;
722 struct sk_buff *skb,*new_skb;
723 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
724 int min_pkt_len, status;
725 int num_rx_pkt = 0;
726 int max_rx_pkt = NUM_RX_BUFFERS;
727 short pkt_len;
728 #if AMD8111E_VLAN_TAG_USED
729 short vtag;
730 #endif
731
732 /* If we own the next entry, it's a new packet. Send it up. */
733 while(++num_rx_pkt <= max_rx_pkt){
734 if(lp->rx_ring[rx_index].rx_flags & OWN_BIT)
735 return 0;
736
737 /* check if err summary bit is set */
738 if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & ERR_BIT){
739 /*
740 * There is a tricky error noted by John Murphy,
741 * <murf@perftech.com> to Russ Nelson: Even with full-sized
742 * buffers it's possible for a jabber packet to use two
743 * buffers, with only the last correctly noting the error. */
744 /* reseting flags */
745 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
746 goto err_next_pkt;
747 }
748 /* check for STP and ENP */
749 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
750 if(!((status & STP_BIT) && (status & ENP_BIT))){
751 /* reseting flags */
752 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
753 goto err_next_pkt;
754 }
755 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
756
757 #if AMD8111E_VLAN_TAG_USED
758 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
759 /*MAC will strip vlan tag*/
760 if(lp->vlgrp != NULL && vtag !=0)
761 min_pkt_len =MIN_PKT_LEN - 4;
762 else
763 #endif
764 min_pkt_len =MIN_PKT_LEN;
765
766 if (pkt_len < min_pkt_len) {
767 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
768 lp->drv_rx_errors++;
769 goto err_next_pkt;
770 }
771 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
772 /* if allocation fail,
773 ignore that pkt and go to next one */
774 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
775 lp->drv_rx_errors++;
776 goto err_next_pkt;
777 }
778
779 skb_reserve(new_skb, 2);
780 skb = lp->rx_skbuff[rx_index];
781 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
782 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
783 skb_put(skb, pkt_len);
784 skb->dev = dev;
785 lp->rx_skbuff[rx_index] = new_skb;
786 new_skb->dev = dev;
787 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
788 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
789
790 skb->protocol = eth_type_trans(skb, dev);
791
792 #if AMD8111E_VLAN_TAG_USED
793
794 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
795 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
796 amd8111e_vlan_rx(lp, skb,
797 lp->rx_ring[rx_index].tag_ctrl_info);
798 } else
799 #endif
800
801 netif_rx (skb);
802 /*COAL update rx coalescing parameters*/
803 lp->coal_conf.rx_packets++;
804 lp->coal_conf.rx_bytes += pkt_len;
805
806 dev->last_rx = jiffies;
807
808 err_next_pkt:
809 lp->rx_ring[rx_index].buff_phy_addr
810 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
811 lp->rx_ring[rx_index].buff_count =
812 cpu_to_le16(lp->rx_buff_len-2);
813 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
814 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
815 }
816
817 return 0;
818 }
819
820 /*
821 This function will indicate the link status to the kernel.
822 */
amd8111e_link_change(struct net_device * dev)823 static int amd8111e_link_change(struct net_device* dev)
824 {
825 struct amd8111e_priv *lp = dev->priv;
826 int status0,speed;
827
828 /* read the link change */
829 status0 = readl(lp->mmio + STAT0);
830
831 if(status0 & LINK_STATS){
832 if(status0 & AUTONEG_COMPLETE)
833 lp->link_config.autoneg = AUTONEG_ENABLE;
834 else
835 lp->link_config.autoneg = AUTONEG_DISABLE;
836
837 if(status0 & FULL_DPLX)
838 lp->link_config.duplex = DUPLEX_FULL;
839 else
840 lp->link_config.duplex = DUPLEX_HALF;
841 speed = (status0 & SPEED_MASK) >> 7;
842 if(speed == PHY_SPEED_10)
843 lp->link_config.speed = SPEED_10;
844 else if(speed == PHY_SPEED_100)
845 lp->link_config.speed = SPEED_100;
846
847 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
848 (lp->link_config.speed == SPEED_100) ? "100": "10",
849 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
850 netif_carrier_on(dev);
851 }
852 else{
853 lp->link_config.speed = SPEED_INVALID;
854 lp->link_config.duplex = DUPLEX_INVALID;
855 lp->link_config.autoneg = AUTONEG_INVALID;
856 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
857 netif_carrier_off(dev);
858 }
859
860 return 0;
861 }
862 /*
863 This function reads the mib counters.
864 */
amd8111e_read_mib(void * mmio,u8 MIB_COUNTER)865 static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
866 {
867 unsigned int status;
868 unsigned int data;
869 unsigned int repeat = REPEAT_CNT;
870
871 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
872 do {
873 status = readw(mmio + MIB_ADDR);
874 udelay(2); /* controller takes MAX 2 us to get mib data */
875 }
876 while (--repeat && (status & MIB_CMD_ACTIVE));
877
878 data = readl(mmio + MIB_DATA);
879 return data;
880 }
881
882 /*
883 This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
884 */
amd8111e_get_stats(struct net_device * dev)885 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
886 {
887 struct amd8111e_priv *lp = dev->priv;
888 void * mmio = lp->mmio;
889 unsigned long flags;
890 /* struct net_device_stats *prev_stats = &lp->prev_stats; */
891 struct net_device_stats* new_stats = &lp->stats;
892
893 if(!lp->opened)
894 return &lp->stats;
895 spin_lock_irqsave (&lp->lock, flags);
896
897 /* stats.rx_packets */
898 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
899 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
900 amd8111e_read_mib(mmio, rcv_unicast_pkts);
901
902 /* stats.tx_packets */
903 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
904
905 /*stats.rx_bytes */
906 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
907
908 /* stats.tx_bytes */
909 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
910
911 /* stats.rx_errors */
912 /* hw errors + errors driver reported */
913 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
914 amd8111e_read_mib(mmio, rcv_fragments)+
915 amd8111e_read_mib(mmio, rcv_jabbers)+
916 amd8111e_read_mib(mmio, rcv_alignment_errors)+
917 amd8111e_read_mib(mmio, rcv_fcs_errors)+
918 amd8111e_read_mib(mmio, rcv_miss_pkts)+
919 lp->drv_rx_errors;
920
921 /* stats.tx_errors */
922 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
923
924 /* stats.rx_dropped*/
925 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
926
927 /* stats.tx_dropped*/
928 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
929
930 /* stats.multicast*/
931 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
932
933 /* stats.collisions*/
934 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
935
936 /* stats.rx_length_errors*/
937 new_stats->rx_length_errors =
938 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
939 amd8111e_read_mib(mmio, rcv_oversize_pkts);
940
941 /* stats.rx_over_errors*/
942 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
943
944 /* stats.rx_crc_errors*/
945 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
946
947 /* stats.rx_frame_errors*/
948 new_stats->rx_frame_errors =
949 amd8111e_read_mib(mmio, rcv_alignment_errors);
950
951 /* stats.rx_fifo_errors */
952 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
953
954 /* stats.rx_missed_errors */
955 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
956
957 /* stats.tx_aborted_errors*/
958 new_stats->tx_aborted_errors =
959 amd8111e_read_mib(mmio, xmt_excessive_collision);
960
961 /* stats.tx_carrier_errors*/
962 new_stats->tx_carrier_errors =
963 amd8111e_read_mib(mmio, xmt_loss_carrier);
964
965 /* stats.tx_fifo_errors*/
966 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
967
968 /* stats.tx_window_errors*/
969 new_stats->tx_window_errors =
970 amd8111e_read_mib(mmio, xmt_late_collision);
971
972 /* Reset the mibs for collecting new statistics */
973 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
974
975 spin_unlock_irqrestore (&lp->lock, flags);
976
977 return new_stats;
978 }
979 /* This function recalculate the interupt coalescing mode on every interrupt
980 according to the datarate and the packet rate.
981 */
amd8111e_calc_coalesce(struct net_device * dev)982 static int amd8111e_calc_coalesce(struct net_device *dev)
983 {
984 struct amd8111e_priv *lp = dev->priv;
985 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
986 int tx_pkt_rate;
987 int rx_pkt_rate;
988 int tx_data_rate;
989 int rx_data_rate;
990 int rx_pkt_size;
991 int tx_pkt_size;
992
993 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
994 coal_conf->tx_prev_packets = coal_conf->tx_packets;
995
996 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
997 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
998
999 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1000 coal_conf->rx_prev_packets = coal_conf->rx_packets;
1001
1002 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1003 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
1004
1005 if(rx_pkt_rate < 800){
1006 if(coal_conf->rx_coal_type != NO_COALESCE){
1007
1008 coal_conf->rx_timeout = 0x0;
1009 coal_conf->rx_event_count = 0;
1010 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1011 coal_conf->rx_coal_type = NO_COALESCE;
1012 }
1013 }
1014 else{
1015
1016 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1017 if (rx_pkt_size < 128){
1018 if(coal_conf->rx_coal_type != NO_COALESCE){
1019
1020 coal_conf->rx_timeout = 0;
1021 coal_conf->rx_event_count = 0;
1022 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1023 coal_conf->rx_coal_type = NO_COALESCE;
1024 }
1025
1026 }
1027 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1028
1029 if(coal_conf->rx_coal_type != LOW_COALESCE){
1030 coal_conf->rx_timeout = 1;
1031 coal_conf->rx_event_count = 4;
1032 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1033 coal_conf->rx_coal_type = LOW_COALESCE;
1034 }
1035 }
1036 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1037
1038 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1039 coal_conf->rx_timeout = 1;
1040 coal_conf->rx_event_count = 4;
1041 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1042 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1043 }
1044
1045 }
1046 else if(rx_pkt_size >= 1024){
1047 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1048 coal_conf->rx_timeout = 2;
1049 coal_conf->rx_event_count = 3;
1050 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1051 coal_conf->rx_coal_type = HIGH_COALESCE;
1052 }
1053 }
1054 }
1055 /* NOW FOR TX INTR COALESC */
1056 if(tx_pkt_rate < 800){
1057 if(coal_conf->tx_coal_type != NO_COALESCE){
1058
1059 coal_conf->tx_timeout = 0x0;
1060 coal_conf->tx_event_count = 0;
1061 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1062 coal_conf->tx_coal_type = NO_COALESCE;
1063 }
1064 }
1065 else{
1066
1067 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1068 if (tx_pkt_size < 128){
1069
1070 if(coal_conf->tx_coal_type != NO_COALESCE){
1071
1072 coal_conf->tx_timeout = 0;
1073 coal_conf->tx_event_count = 0;
1074 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1075 coal_conf->tx_coal_type = NO_COALESCE;
1076 }
1077
1078 }
1079 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1080
1081 if(coal_conf->tx_coal_type != LOW_COALESCE){
1082 coal_conf->tx_timeout = 1;
1083 coal_conf->tx_event_count = 2;
1084 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1085 coal_conf->tx_coal_type = LOW_COALESCE;
1086
1087 }
1088 }
1089 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1090
1091 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1092 coal_conf->tx_timeout = 2;
1093 coal_conf->tx_event_count = 5;
1094 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1095 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1096 }
1097
1098 }
1099 else if(tx_pkt_size >= 1024){
1100 if (tx_pkt_size >= 1024){
1101 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1102 coal_conf->tx_timeout = 4;
1103 coal_conf->tx_event_count = 8;
1104 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1105 coal_conf->tx_coal_type = HIGH_COALESCE;
1106 }
1107 }
1108 }
1109 }
1110 return 0;
1111
1112 }
1113 /*
1114 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1115 */
amd8111e_interrupt(int irq,void * dev_id,struct pt_regs * regs)1116 static void amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1117 {
1118
1119 struct net_device * dev = (struct net_device *) dev_id;
1120 struct amd8111e_priv *lp = dev->priv;
1121 void * mmio = lp->mmio;
1122 unsigned int intr0;
1123
1124 if(dev == NULL)
1125 return;
1126
1127 if (regs) spin_lock (&lp->lock);
1128 /* disabling interrupt */
1129 writel(INTREN, mmio + CMD0);
1130
1131 /* Read interrupt status */
1132 intr0 = readl(mmio + INT0);
1133
1134 /* Process all the INT event until INTR bit is clear. */
1135
1136 if (!(intr0 & INTR))
1137 goto err_no_interrupt;
1138
1139 /* Current driver processes 3 interrupts : RINT,TINT,LCINT */
1140 writel(intr0, mmio + INT0);
1141
1142 /* Check if Receive Interrupt has occurred. */
1143 if(intr0 & RINT0){
1144 amd8111e_rx(dev);
1145 writel(VAL2 | RDMD0, mmio + CMD0);
1146 }
1147
1148 /* Check if Transmit Interrupt has occurred. */
1149 if(intr0 & TINT0)
1150 amd8111e_tx(dev);
1151
1152 /* Check if Link Change Interrupt has occurred. */
1153 if (intr0 & LCINT)
1154 amd8111e_link_change(dev);
1155
1156 /* Check if Hardware Timer Interrupt has occurred. */
1157 if (intr0 & STINT)
1158 amd8111e_calc_coalesce(dev);
1159
1160 err_no_interrupt:
1161 writel( VAL0 | INTREN,mmio + CMD0);
1162
1163 if (regs) spin_unlock(&lp->lock);
1164
1165 }
1166
1167 /*
1168 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1169 */
amd8111e_close(struct net_device * dev)1170 static int amd8111e_close(struct net_device * dev)
1171 {
1172 struct amd8111e_priv *lp = dev->priv;
1173 netif_stop_queue(dev);
1174
1175 spin_lock_irq(&lp->lock);
1176
1177 amd8111e_disable_interrupt(lp);
1178 amd8111e_stop_chip(lp);
1179 amd8111e_free_ring(lp);
1180
1181 netif_carrier_off(lp->amd8111e_net_dev);
1182
1183 /* Delete ipg timer */
1184 if(lp->options & OPTION_DYN_IPG_ENABLE)
1185 del_timer_sync(&lp->ipg_data.ipg_timer);
1186
1187 spin_unlock_irq(&lp->lock);
1188 free_irq(dev->irq, dev);
1189
1190 /* Update the statistics before closing */
1191 amd8111e_get_stats(dev);
1192 lp->opened = 0;
1193 return 0;
1194 }
1195 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1196 */
amd8111e_open(struct net_device * dev)1197 static int amd8111e_open(struct net_device * dev )
1198 {
1199 struct amd8111e_priv *lp = (struct amd8111e_priv *)dev->priv;
1200
1201 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
1202 dev->name, dev))
1203 return -EAGAIN;
1204
1205 spin_lock_irq(&lp->lock);
1206
1207 amd8111e_init_hw_default(lp);
1208
1209 if(amd8111e_restart(dev)){
1210 spin_unlock_irq(&lp->lock);
1211 return -ENOMEM;
1212 }
1213 /* Start ipg timer */
1214 if(lp->options & OPTION_DYN_IPG_ENABLE){
1215 add_timer(&lp->ipg_data.ipg_timer);
1216 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1217 }
1218
1219 lp->opened = 1;
1220
1221 spin_unlock_irq(&lp->lock);
1222
1223 netif_start_queue(dev);
1224
1225 return 0;
1226 }
1227 /*
1228 This function checks if there is any transmit descriptors available to queue more packet.
1229 */
amd8111e_tx_queue_avail(struct amd8111e_priv * lp)1230 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1231 {
1232 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1233 if(lp->tx_skbuff[tx_index] != 0)
1234 return -1;
1235 else
1236 return 0;
1237
1238 }
1239 /*
1240 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1241 */
1242
amd8111e_start_xmit(struct sk_buff * skb,struct net_device * dev)1243 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1244 {
1245 struct amd8111e_priv *lp = dev->priv;
1246 int tx_index;
1247 unsigned long flags;
1248
1249 spin_lock_irqsave(&lp->lock, flags);
1250
1251 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1252
1253 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1254
1255 lp->tx_skbuff[tx_index] = skb;
1256 lp->tx_ring[tx_index].tx_flags = 0;
1257
1258 #if AMD8111E_VLAN_TAG_USED
1259 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1260 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1261 cpu_to_le32(TCC_VLAN_INSERT);
1262 lp->tx_ring[tx_index].tag_ctrl_info =
1263 cpu_to_le16(vlan_tx_tag_get(skb));
1264
1265 }
1266 #endif
1267 lp->tx_dma_addr[tx_index] =
1268 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1269 lp->tx_ring[tx_index].buff_phy_addr =
1270 (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
1271
1272 /* Set FCS and LTINT bits */
1273 lp->tx_ring[tx_index].tx_flags |=
1274 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1275
1276 lp->tx_idx++;
1277
1278 /* Trigger an immediate send poll. */
1279 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1280 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1281
1282 dev->trans_start = jiffies;
1283
1284 if(amd8111e_tx_queue_avail(lp) < 0){
1285 netif_stop_queue(dev);
1286 }
1287 spin_unlock_irqrestore(&lp->lock, flags);
1288 return 0;
1289 }
1290 /*
1291 This function returns all the memory mapped registers of the device.
1292 */
amd8111e_read_regs(struct amd8111e_priv * lp)1293 static char* amd8111e_read_regs(struct amd8111e_priv* lp)
1294 {
1295 void * mmio = lp->mmio;
1296 u32 * reg_buff;
1297
1298 reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
1299 if(NULL == reg_buff)
1300 return NULL;
1301
1302 /* Read only necessary registers */
1303 reg_buff[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1304 reg_buff[1] = readl(mmio + XMT_RING_LEN0);
1305 reg_buff[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1306 reg_buff[3] = readl(mmio + RCV_RING_LEN0);
1307 reg_buff[4] = readl(mmio + CMD0);
1308 reg_buff[5] = readl(mmio + CMD2);
1309 reg_buff[6] = readl(mmio + CMD3);
1310 reg_buff[7] = readl(mmio + CMD7);
1311 reg_buff[8] = readl(mmio + INT0);
1312 reg_buff[9] = readl(mmio + INTEN0);
1313 reg_buff[10] = readl(mmio + LADRF);
1314 reg_buff[11] = readl(mmio + LADRF+4);
1315 reg_buff[12] = readl(mmio + STAT0);
1316
1317 return (char *)reg_buff;
1318 }
1319 /*
1320 amd8111e crc generator implementation is different from the kernel
1321 ether_crc() function.
1322 */
amd8111e_ether_crc(int len,char * mac_addr)1323 int amd8111e_ether_crc(int len, char* mac_addr)
1324 {
1325 int i,byte;
1326 unsigned char octet;
1327 u32 crc= INITCRC;
1328
1329 for(byte=0; byte < len; byte++){
1330 octet = mac_addr[byte];
1331 for( i=0;i < 8; i++){
1332 /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
1333 if( (octet & 0x1) ^ (crc & 0x1) ){
1334 crc >>= 1;
1335 crc ^= CRC32;
1336 }
1337 else
1338 crc >>= 1;
1339
1340 octet >>= 1;
1341 }
1342 }
1343 return crc;
1344 }
1345 /*
1346 This function sets promiscuos mode, all-multi mode or the multicast address
1347 list to the device.
1348 */
amd8111e_set_multicast_list(struct net_device * dev)1349 static void amd8111e_set_multicast_list(struct net_device *dev)
1350 {
1351 struct dev_mc_list* mc_ptr;
1352 struct amd8111e_priv *lp = dev->priv;
1353 u32 mc_filter[2] ;
1354 int i,bit_num;
1355 if(dev->flags & IFF_PROMISC){
1356 printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
1357 writel( VAL2 | PROM, lp->mmio + CMD2);
1358 return;
1359 }
1360 else
1361 writel( PROM, lp->mmio + CMD2);
1362 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1363 /* get all multicast packet */
1364 mc_filter[1] = mc_filter[0] = 0xffffffff;
1365 lp->mc_list = dev->mc_list;
1366 lp->options |= OPTION_MULTICAST_ENABLE;
1367 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1368 return;
1369 }
1370 if( dev->mc_count == 0 ){
1371 /* get only own packets */
1372 mc_filter[1] = mc_filter[0] = 0;
1373 lp->mc_list = 0;
1374 lp->options &= ~OPTION_MULTICAST_ENABLE;
1375 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1376 /* disable promiscous mode */
1377 writel(PROM, lp->mmio + CMD2);
1378 return;
1379 }
1380 /* load all the multicast addresses in the logic filter */
1381 lp->options |= OPTION_MULTICAST_ENABLE;
1382 lp->mc_list = dev->mc_list;
1383 mc_filter[1] = mc_filter[0] = 0;
1384 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1385 i++, mc_ptr = mc_ptr->next) {
1386 bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
1387 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1388 }
1389 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1390
1391 /* To eliminate PCI posting bug */
1392 readl(lp->mmio + CMD2);
1393
1394 }
1395
1396 /*
1397 This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1398 */
1399
amd8111e_ethtool_ioctl(struct net_device * dev,void * useraddr)1400 static int amd8111e_ethtool_ioctl(struct net_device* dev, void* useraddr)
1401 {
1402 struct amd8111e_priv *lp = dev->priv;
1403 struct pci_dev *pci_dev = lp->pci_dev;
1404 u32 ethcmd;
1405
1406 if( useraddr == NULL)
1407 return -EINVAL;
1408 if(copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
1409 return -EFAULT;
1410
1411 switch(ethcmd){
1412
1413 case ETHTOOL_GDRVINFO:{
1414 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1415 strcpy (info.driver, MODULE_NAME);
1416 strcpy (info.version, MODULE_VERSION);
1417 memset(&info.fw_version, 0, sizeof(info.fw_version));
1418 sprintf(info.fw_version,"%u",chip_version);
1419 strcpy (info.bus_info, pci_dev->slot_name);
1420 info.eedump_len = 0;
1421 info.regdump_len = AMD8111E_REG_DUMP_LEN;
1422 if (copy_to_user (useraddr, &info, sizeof(info)))
1423 return -EFAULT;
1424 return 0;
1425 }
1426 /* get settings */
1427 case ETHTOOL_GSET: {
1428 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1429 spin_lock_irq(&lp->lock);
1430 mii_ethtool_gset(&lp->mii_if, &ecmd);
1431 spin_unlock_irq(&lp->lock);
1432 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1433 return -EFAULT;
1434 return 0;
1435 }
1436 /* set settings */
1437 case ETHTOOL_SSET: {
1438 int r;
1439 struct ethtool_cmd ecmd;
1440 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1441 return -EFAULT;
1442
1443 spin_lock_irq(&lp->lock);
1444 r = mii_ethtool_sset(&lp->mii_if, &ecmd);
1445 spin_unlock_irq(&lp->lock);
1446 return r;
1447 }
1448 case ETHTOOL_GREGS: {
1449 struct ethtool_regs regs;
1450 u8 *regbuf;
1451 int ret;
1452
1453 if (copy_from_user(®s, useraddr, sizeof(regs)))
1454 return -EFAULT;
1455 if (regs.len > AMD8111E_REG_DUMP_LEN)
1456 regs.len = AMD8111E_REG_DUMP_LEN;
1457 regs.version = 0;
1458 if (copy_to_user(useraddr, ®s, sizeof(regs)))
1459 return -EFAULT;
1460
1461 regbuf = amd8111e_read_regs(lp);
1462 if (!regbuf)
1463 return -ENOMEM;
1464
1465 useraddr += offsetof(struct ethtool_regs, data);
1466 ret = 0;
1467 if (copy_to_user(useraddr, regbuf, regs.len))
1468 ret = -EFAULT;
1469 kfree(regbuf);
1470 return ret;
1471 }
1472 /* restart autonegotiation */
1473 case ETHTOOL_NWAY_RST: {
1474 return mii_nway_restart(&lp->mii_if);
1475 }
1476 /* get link status */
1477 case ETHTOOL_GLINK: {
1478 struct ethtool_value val = {ETHTOOL_GLINK};
1479 val.data = mii_link_ok(&lp->mii_if);
1480 if (copy_to_user(useraddr, &val, sizeof(val)))
1481 return -EFAULT;
1482 return 0;
1483 }
1484 case ETHTOOL_GWOL: {
1485 struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
1486
1487 wol_info.supported = WAKE_MAGIC|WAKE_PHY;
1488 wol_info.wolopts = 0;
1489 if (lp->options & OPTION_WOL_ENABLE)
1490 wol_info.wolopts = WAKE_MAGIC;
1491 memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
1492 if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
1493 return -EFAULT;
1494 return 0;
1495 }
1496 case ETHTOOL_SWOL: {
1497 struct ethtool_wolinfo wol_info;
1498
1499 if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
1500 return -EFAULT;
1501 if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
1502 return -EINVAL;
1503 spin_lock_irq(&lp->lock);
1504 if(wol_info.wolopts & WAKE_MAGIC)
1505 lp->options |=
1506 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1507 else if(wol_info.wolopts & WAKE_PHY)
1508 lp->options |=
1509 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1510 else
1511 lp->options &= ~OPTION_WOL_ENABLE;
1512 spin_unlock_irq(&lp->lock);
1513 return 0;
1514 }
1515
1516 default:
1517 break;
1518 }
1519 return -EOPNOTSUPP;
1520 }
amd8111e_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1521 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1522 {
1523 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
1524 struct amd8111e_priv *lp = dev->priv;
1525 int err;
1526 u32 mii_regval;
1527
1528 if (!capable(CAP_NET_ADMIN))
1529 return -EPERM;
1530
1531 switch(cmd) {
1532 case SIOCETHTOOL:
1533 return amd8111e_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1534 case SIOCGMIIPHY:
1535 data->phy_id = PHY_ID;
1536
1537 /* fallthru */
1538 case SIOCGMIIREG:
1539
1540 spin_lock_irq(&lp->lock);
1541 err = amd8111e_read_phy(lp, data->phy_id,
1542 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1543 spin_unlock_irq(&lp->lock);
1544
1545 data->val_out = mii_regval;
1546 return err;
1547
1548 case SIOCSMIIREG:
1549
1550 spin_lock_irq(&lp->lock);
1551 err = amd8111e_write_phy(lp, data->phy_id,
1552 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1553 spin_unlock_irq(&lp->lock);
1554
1555 return err;
1556
1557 default:
1558 /* do nothing */
1559 break;
1560 }
1561 return -EOPNOTSUPP;
1562 }
amd8111e_set_mac_address(struct net_device * dev,void * p)1563 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1564 {
1565 struct amd8111e_priv *lp = dev->priv;
1566 int i;
1567 struct sockaddr *addr = p;
1568
1569 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1570 spin_lock_irq(&lp->lock);
1571 /* Setting the MAC address to the device */
1572 for(i = 0; i < ETH_ADDR_LEN; i++)
1573 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1574
1575 spin_unlock_irq(&lp->lock);
1576
1577 return 0;
1578 }
1579
1580 /*
1581 This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1582 */
amd8111e_change_mtu(struct net_device * dev,int new_mtu)1583 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1584 {
1585 struct amd8111e_priv *lp = dev->priv;
1586 int err;
1587
1588 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1589 return -EINVAL;
1590
1591 if (!netif_running(dev)) {
1592 /* new_mtu will be used
1593 when device starts netxt time */
1594 dev->mtu = new_mtu;
1595 return 0;
1596 }
1597
1598 spin_lock_irq(&lp->lock);
1599
1600 /* stop the chip */
1601 writel(RUN, lp->mmio + CMD0);
1602
1603 dev->mtu = new_mtu;
1604
1605 err = amd8111e_restart(dev);
1606 spin_unlock_irq(&lp->lock);
1607 if(!err)
1608 netif_start_queue(dev);
1609 return err;
1610 }
1611
1612 #if AMD8111E_VLAN_TAG_USED
amd8111e_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)1613 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1614 {
1615 struct amd8111e_priv *lp = dev->priv;
1616 spin_lock_irq(&lp->lock);
1617 lp->vlgrp = grp;
1618 spin_unlock_irq(&lp->lock);
1619 }
1620
amd8111e_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)1621 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1622 {
1623 struct amd8111e_priv *lp = dev->priv;
1624 spin_lock_irq(&lp->lock);
1625 if (lp->vlgrp)
1626 lp->vlgrp->vlan_devices[vid] = NULL;
1627 spin_unlock_irq(&lp->lock);
1628 }
1629 #endif
amd8111e_enable_magicpkt(struct amd8111e_priv * lp)1630 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1631 {
1632 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1633 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1634
1635 /* To eliminate PCI posting bug */
1636 readl(lp->mmio + CMD7);
1637 return 0;
1638 }
1639
amd8111e_enable_link_change(struct amd8111e_priv * lp)1640 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1641 {
1642
1643 /* Adapter is already stoped/suspended/interrupt-disabled */
1644 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1645
1646 /* To eliminate PCI posting bug */
1647 readl(lp->mmio + CMD7);
1648 return 0;
1649 }
1650 /* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
1651
amd8111e_tx_timeout(struct net_device * dev)1652 static void amd8111e_tx_timeout(struct net_device *dev)
1653 {
1654 struct amd8111e_priv* lp = dev->priv;
1655 int err;
1656
1657 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1658 dev->name);
1659 spin_lock_irq(&lp->lock);
1660 err = amd8111e_restart(dev);
1661 spin_unlock_irq(&lp->lock);
1662 if(!err)
1663 netif_wake_queue(dev);
1664 }
amd8111e_suspend(struct pci_dev * pci_dev,u32 state)1665 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1666 {
1667 struct net_device *dev = pci_get_drvdata(pci_dev);
1668 struct amd8111e_priv *lp = dev->priv;
1669
1670 if (!netif_running(dev))
1671 return 0;
1672
1673 /* disable the interrupt */
1674 spin_lock_irq(&lp->lock);
1675 amd8111e_disable_interrupt(lp);
1676 spin_unlock_irq(&lp->lock);
1677
1678 netif_device_detach(dev);
1679
1680 /* stop chip */
1681 spin_lock_irq(&lp->lock);
1682 if(lp->options & OPTION_DYN_IPG_ENABLE)
1683 del_timer_sync(&lp->ipg_data.ipg_timer);
1684 amd8111e_stop_chip(lp);
1685 spin_unlock_irq(&lp->lock);
1686
1687 if(lp->options & OPTION_WOL_ENABLE){
1688 /* enable wol */
1689 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1690 amd8111e_enable_magicpkt(lp);
1691 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1692 amd8111e_enable_link_change(lp);
1693
1694 pci_enable_wake(pci_dev, 3, 1);
1695 pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
1696
1697 }
1698 else{
1699 pci_enable_wake(pci_dev, 3, 0);
1700 pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
1701 }
1702
1703 pci_save_state(pci_dev, lp->pm_state);
1704 pci_set_power_state(pci_dev, 3);
1705
1706 return 0;
1707 }
amd8111e_resume(struct pci_dev * pci_dev)1708 static int amd8111e_resume(struct pci_dev *pci_dev)
1709 {
1710 struct net_device *dev = pci_get_drvdata(pci_dev);
1711 struct amd8111e_priv *lp = dev->priv;
1712
1713 if (!netif_running(dev))
1714 return 0;
1715
1716 pci_set_power_state(pci_dev, 0);
1717 pci_restore_state(pci_dev, lp->pm_state);
1718
1719 pci_enable_wake(pci_dev, 3, 0);
1720 pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
1721
1722 netif_device_attach(dev);
1723
1724 spin_lock_irq(&lp->lock);
1725 amd8111e_restart(dev);
1726 /* Restart ipg timer */
1727 if(lp->options & OPTION_DYN_IPG_ENABLE)
1728 mod_timer(&lp->ipg_data.ipg_timer,
1729 jiffies + (IPG_CONVERGE_TIME * HZ));
1730 spin_unlock_irq(&lp->lock);
1731
1732 return 0;
1733 }
1734
1735
amd8111e_remove_one(struct pci_dev * pdev)1736 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1737 {
1738 struct net_device *dev = pci_get_drvdata(pdev);
1739 if (dev) {
1740 unregister_netdev(dev);
1741 iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
1742 kfree(dev);
1743 pci_release_regions(pdev);
1744 pci_disable_device(pdev);
1745 pci_set_drvdata(pdev, NULL);
1746 }
1747 }
amd8111e_config_ipg(struct net_device * dev)1748 static void amd8111e_config_ipg(struct net_device* dev)
1749 {
1750 struct amd8111e_priv *lp = dev->priv;
1751 struct ipg_info* ipg_data = &lp->ipg_data;
1752 void * mmio = lp->mmio;
1753 unsigned int prev_col_cnt = ipg_data->col_cnt;
1754 unsigned int total_col_cnt;
1755 unsigned int tmp_ipg;
1756
1757 if(lp->link_config.duplex == DUPLEX_FULL){
1758 ipg_data->ipg = DEFAULT_IPG;
1759 return;
1760 }
1761
1762 if(ipg_data->ipg_state == SSTATE){
1763
1764 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1765
1766 ipg_data->timer_tick = 0;
1767 ipg_data->ipg = MIN_IPG - IPG_STEP;
1768 ipg_data->current_ipg = MIN_IPG;
1769 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1770 ipg_data->ipg_state = CSTATE;
1771 }
1772 else
1773 ipg_data->timer_tick++;
1774 }
1775
1776 if(ipg_data->ipg_state == CSTATE){
1777
1778 /* Get the current collision count */
1779
1780 total_col_cnt = ipg_data->col_cnt =
1781 amd8111e_read_mib(mmio, xmt_collisions);
1782
1783 if ((total_col_cnt - prev_col_cnt) <
1784 (ipg_data->diff_col_cnt)){
1785
1786 ipg_data->diff_col_cnt =
1787 total_col_cnt - prev_col_cnt ;
1788
1789 ipg_data->ipg = ipg_data->current_ipg;
1790 }
1791
1792 ipg_data->current_ipg += IPG_STEP;
1793
1794 if (ipg_data->current_ipg <= MAX_IPG)
1795 tmp_ipg = ipg_data->current_ipg;
1796 else{
1797 tmp_ipg = ipg_data->ipg;
1798 ipg_data->ipg_state = SSTATE;
1799 }
1800 writew((u32)tmp_ipg, mmio + IPG);
1801 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1802 }
1803 mod_timer(&lp->ipg_data.ipg_timer, jiffies + (IPG_CONVERGE_TIME * HZ));
1804 return;
1805
1806 }
1807
amd8111e_probe_one(struct pci_dev * pdev,const struct pci_device_id * ent)1808 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1809 const struct pci_device_id *ent)
1810 {
1811 int err,i,pm_cap;
1812 unsigned long reg_addr,reg_len;
1813 struct amd8111e_priv* lp;
1814 struct net_device* dev;
1815
1816 err = pci_enable_device(pdev);
1817 if(err){
1818 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1819 "exiting.\n");
1820 return err;
1821 }
1822
1823 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1824 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1825 "exiting.\n");
1826 err = -ENODEV;
1827 goto err_disable_pdev;
1828 }
1829
1830 err = pci_request_regions(pdev, MODULE_NAME);
1831 if(err){
1832 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1833 "exiting.\n");
1834 goto err_disable_pdev;
1835 }
1836
1837 pci_set_master(pdev);
1838
1839 /* Find power-management capability. */
1840 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1841 printk(KERN_ERR "amd8111e: No Power Management capability, "
1842 "exiting.\n");
1843 goto err_free_reg;
1844 }
1845
1846 /* Initialize DMA */
1847 if(!pci_dma_supported(pdev, 0xffffffff)){
1848 printk(KERN_ERR "amd8111e: DMA not supported,"
1849 "exiting.\n");
1850 goto err_free_reg;
1851 } else
1852 pdev->dma_mask = 0xffffffff;
1853
1854 reg_addr = pci_resource_start(pdev, 0);
1855 reg_len = pci_resource_len(pdev, 0);
1856
1857 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1858 if (!dev) {
1859 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1860 err = -ENOMEM;
1861 goto err_free_reg;
1862 }
1863
1864 SET_MODULE_OWNER(dev);
1865
1866 #if AMD8111E_VLAN_TAG_USED
1867 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1868 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1869 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1870 #endif
1871
1872 lp = dev->priv;
1873 lp->pci_dev = pdev;
1874 lp->amd8111e_net_dev = dev;
1875 lp->pm_cap = pm_cap;
1876
1877 /* setting mii default values */
1878 lp->mii_if.dev = dev;
1879 lp->mii_if.mdio_read = amd8111e_mdio_read;
1880 lp->mii_if.mdio_write = amd8111e_mdio_write;
1881 lp->mii_if.phy_id = PHY_ID;
1882
1883 spin_lock_init(&lp->lock);
1884
1885 lp->mmio = ioremap(reg_addr, reg_len);
1886 if (lp->mmio == 0) {
1887 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1888 "exiting\n");
1889 err = -ENOMEM;
1890 goto err_free_dev;
1891 }
1892
1893 /* Initializing MAC address */
1894 for(i = 0; i < ETH_ADDR_LEN; i++)
1895 dev->dev_addr[i] =readb(lp->mmio + PADR + i);
1896
1897 /* Setting user defined parametrs */
1898 lp->ext_phy_option = speed_duplex[card_idx];
1899 if(coalesce[card_idx])
1900 lp->options |= OPTION_INTR_COAL_ENABLE;
1901 if(dynamic_ipg[card_idx++])
1902 lp->options |= OPTION_DYN_IPG_ENABLE;
1903
1904 /* Initialize driver entry points */
1905 dev->open = amd8111e_open;
1906 dev->hard_start_xmit = amd8111e_start_xmit;
1907 dev->stop = amd8111e_close;
1908 dev->get_stats = amd8111e_get_stats;
1909 dev->set_multicast_list = amd8111e_set_multicast_list;
1910 dev->set_mac_address = amd8111e_set_mac_address;
1911 dev->do_ioctl = amd8111e_ioctl;
1912 dev->change_mtu = amd8111e_change_mtu;
1913 dev->irq =pdev->irq;
1914 dev->tx_timeout = amd8111e_tx_timeout;
1915 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1916
1917 #if AMD8111E_VLAN_TAG_USED
1918 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1919 dev->vlan_rx_register =amd8111e_vlan_rx_register;
1920 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
1921 #endif
1922
1923 /* Set receive buffer length and set jumbo option*/
1924 amd8111e_set_rx_buff_len(dev);
1925
1926 err = register_netdev(dev);
1927 if (err) {
1928 printk(KERN_ERR "amd8111e: Cannot register net device, "
1929 "exiting.\n");
1930 goto err_iounmap;
1931 }
1932
1933 pci_set_drvdata(pdev, dev);
1934
1935 /* Initialize software ipg timer */
1936 if(lp->options & OPTION_DYN_IPG_ENABLE){
1937 init_timer(&lp->ipg_data.ipg_timer);
1938 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1939 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1940 lp->ipg_data.ipg_timer.expires = jiffies +
1941 IPG_CONVERGE_TIME * HZ;
1942 lp->ipg_data.ipg = DEFAULT_IPG;
1943 lp->ipg_data.ipg_state = CSTATE;
1944 };
1945
1946 /* display driver and device information */
1947
1948 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1949 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERSION);
1950 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
1951 for (i = 0; i < 6; i++)
1952 printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
1953 printk( "\n");
1954 return 0;
1955 err_iounmap:
1956 iounmap((void *) lp->mmio);
1957
1958 err_free_dev:
1959 kfree(dev);
1960
1961 err_free_reg:
1962 pci_release_regions(pdev);
1963
1964 err_disable_pdev:
1965 pci_disable_device(pdev);
1966 pci_set_drvdata(pdev, NULL);
1967 return err;
1968
1969 }
1970
1971 static struct pci_driver amd8111e_driver = {
1972 name: MODULE_NAME,
1973 id_table: amd8111e_pci_tbl,
1974 probe: amd8111e_probe_one,
1975 remove: __devexit_p(amd8111e_remove_one),
1976 suspend: amd8111e_suspend,
1977 resume: amd8111e_resume
1978 };
1979
amd8111e_init(void)1980 static int __init amd8111e_init(void)
1981 {
1982 return pci_module_init(&amd8111e_driver);
1983 }
1984
amd8111e_cleanup(void)1985 static void __exit amd8111e_cleanup(void)
1986 {
1987 pci_unregister_driver(&amd8111e_driver);
1988 }
1989
1990 module_init(amd8111e_init);
1991 module_exit(amd8111e_cleanup);
1992