1 /* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2 /*
3 Written/copyright 1994-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
12
13 -----------------------------------------------------------
14
15 Linux kernel-specific changes:
16
17 LK1.0 (Ion Badulescu)
18 - Major cleanup
19 - Use 2.4 PCI API
20 - Support ethtool
21 - Rewrite perfect filter/hash code
22 - Use interrupts for media changes
23
24 LK1.1 (Ion Badulescu)
25 - Disallow negotiation of unsupported full-duplex modes
26 */
27
28 #define DRV_NAME "xircom_tulip_cb"
29 #define DRV_VERSION "0.91+LK1.1"
30 #define DRV_RELDATE "October 11, 2001"
31
32 #define CARDBUS 1
33
34 /* A few user-configurable values. */
35
36 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37 static int max_interrupt_work = 25;
38
39 #define MAX_UNITS 4
40 /* Used to pass the full-duplex flag, etc. */
41 static int full_duplex[MAX_UNITS];
42 static int options[MAX_UNITS];
43 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
44
45 /* Keep the ring sizes a power of two for efficiency.
46 Making the Tx ring too large decreases the effectiveness of channel
47 bonding and packet priority.
48 There are no ill effects from too-large receive rings. */
49 #define TX_RING_SIZE 16
50 #define RX_RING_SIZE 32
51
52 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
53 #ifdef __alpha__
54 static int rx_copybreak = 1518;
55 #else
56 static int rx_copybreak = 100;
57 #endif
58
59 /*
60 Set the bus performance register.
61 Typical: Set 16 longword cache alignment, no burst limit.
62 Cache alignment bits 15:14 Burst length 13:8
63 0000 No alignment 0x00000000 unlimited 0800 8 longwords
64 4000 8 longwords 0100 1 longword 1000 16 longwords
65 8000 16 longwords 0200 2 longwords 2000 32 longwords
66 C000 32 longwords 0400 4 longwords
67 Warning: many older 486 systems are broken and require setting 0x00A04800
68 8 longword cache alignment, 8 longword burst.
69 ToDo: Non-Intel setting could be better.
70 */
71
72 #if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
73 static int csr0 = 0x01A00000 | 0xE000;
74 #elif defined(__powerpc__)
75 static int csr0 = 0x01B00000 | 0x8000;
76 #elif defined(__sparc__)
77 static int csr0 = 0x01B00080 | 0x8000;
78 #elif defined(__i386__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #else
81 #warning Processor architecture undefined!
82 static int csr0 = 0x00A00000 | 0x4800;
83 #endif
84
85 /* Operational parameters that usually are not changed. */
86 /* Time in jiffies before concluding the transmitter is hung. */
87 #define TX_TIMEOUT (4 * HZ)
88 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
89 #define PKT_SETUP_SZ 192 /* Size of the setup frame */
90
91 /* PCI registers */
92 #define PCI_POWERMGMT 0x40
93
94 #include <linux/config.h>
95 #include <linux/module.h>
96 #include <linux/kernel.h>
97 #include <linux/pci.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/mii.h>
103 #include <linux/ethtool.h>
104 #include <linux/crc32.h>
105
106 #include <asm/io.h>
107 #include <asm/processor.h> /* Processor type for cache alignment. */
108 #include <asm/uaccess.h>
109
110
111 /* These identify the driver base version and may not be removed. */
112 static char version[] __devinitdata =
113 KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
114 KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
115
116 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117 MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
118 MODULE_LICENSE("GPL v2");
119
120 MODULE_PARM(debug, "i");
121 MODULE_PARM(max_interrupt_work, "i");
122 MODULE_PARM(rx_copybreak, "i");
123 MODULE_PARM(csr0, "i");
124 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
125 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
126
127 #define RUN_AT(x) (jiffies + (x))
128
129 #define xircom_debug debug
130 #ifdef XIRCOM_DEBUG
131 static int xircom_debug = XIRCOM_DEBUG;
132 #else
133 static int xircom_debug = 1;
134 #endif
135
136 /*
137 Theory of Operation
138
139 I. Board Compatibility
140
141 This device driver was forked from the driver for the DECchip "Tulip",
142 Digital's single-chip ethernet controllers for PCI. It supports Xircom's
143 almost-Tulip-compatible CBE-100 CardBus adapters.
144
145 II. Board-specific settings
146
147 PCI bus devices are configured by the system at boot time, so no jumpers
148 need to be set on the board. The system BIOS preferably should assign the
149 PCI INTA signal to an otherwise unused system IRQ line.
150
151 III. Driver operation
152
153 IIIa. Ring buffers
154
155 The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
156 This driver uses statically allocated rings of Rx and Tx descriptors, set at
157 compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
158 for the Rx ring buffers at open() time and passes the skb->data field to the
159 Xircom as receive data buffers. When an incoming frame is less than
160 RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
161 copied to the new skbuff. When the incoming frame is larger, the skbuff is
162 passed directly up the protocol stack and replaced by a newly allocated
163 skbuff.
164
165 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
166 using a full-sized skbuff for small frames vs. the copying costs of larger
167 frames. For small frames the copying cost is negligible (esp. considering
168 that we are pre-loading the cache with immediately useful header
169 information). For large frames the copying cost is non-trivial, and the
170 larger copy might flush the cache of useful data. A subtle aspect of this
171 choice is that the Xircom only receives into longword aligned buffers, thus
172 the IP header at offset 14 isn't longword aligned for further processing.
173 Copied frames are put into the new skbuff at an offset of "+2", thus copying
174 has the beneficial effect of aligning the IP header and preloading the
175 cache.
176
177 IIIC. Synchronization
178 The driver runs as two independent, single-threaded flows of control. One
179 is the send-packet routine, which enforces single-threaded use by the
180 dev->tbusy flag. The other thread is the interrupt handler, which is single
181 threaded by the hardware and other software.
182
183 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
184 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
185 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
186 the 'tp->tx_full' flag.
187
188 The interrupt handler has exclusive control over the Rx ring and records stats
189 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
190 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
191 stats.) After reaping the stats, it marks the queue entry as empty by setting
192 the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
193 tx_full and tbusy flags.
194
195 IV. Notes
196
197 IVb. References
198
199 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
200 http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
201 http://www.national.com/pf/DP/DP83840A.html
202
203 IVc. Errata
204
205 */
206
207 /* A full-duplex map for media types. */
208 enum MediaIs {
209 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
210 MediaIs100=16};
211 static const char media_cap[] =
212 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
213
214 /* Offsets to the Command and Status Registers, "CSRs". All accesses
215 must be longword instructions and quadword aligned. */
216 enum xircom_offsets {
217 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
218 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
219 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
220
221 /* The bits in the CSR5 status registers, mostly interrupt sources. */
222 enum status_bits {
223 LinkChange=0x08000000,
224 NormalIntr=0x10000, NormalIntrMask=0x00014045,
225 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
226 ReservedIntrMask=0xe0001a18,
227 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
228 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
229 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
230 };
231
232 enum csr0_control_bits {
233 EnableMWI=0x01000000, EnableMRL=0x00800000,
234 EnableMRM=0x00200000, EqualBusPrio=0x02,
235 SoftwareReset=0x01,
236 };
237
238 enum csr6_control_bits {
239 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
240 HashFilterBit=0x01, FullDuplexBit=0x0200,
241 TxThresh10=0x400000, TxStoreForw=0x200000,
242 TxThreshMask=0xc000, TxThreshShift=14,
243 EnableTx=0x2000, EnableRx=0x02,
244 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
245 EnableTxRx=(EnableTx | EnableRx),
246 };
247
248
249 enum tbl_flag {
250 HAS_MII=1, HAS_ACPI=2,
251 };
252 static struct xircom_chip_table {
253 char *chip_name;
254 int valid_intrs; /* CSR7 interrupt enable settings */
255 int flags;
256 } xircom_tbl[] = {
257 { "Xircom Cardbus Adapter",
258 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
259 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
260 HAS_MII | HAS_ACPI, },
261 { NULL, },
262 };
263 /* This matches the table above. */
264 enum chips {
265 X3201_3,
266 };
267
268
269 /* The Xircom Rx and Tx buffer descriptors. */
270 struct xircom_rx_desc {
271 s32 status;
272 s32 length;
273 u32 buffer1, buffer2;
274 };
275
276 struct xircom_tx_desc {
277 s32 status;
278 s32 length;
279 u32 buffer1, buffer2; /* We use only buffer 1. */
280 };
281
282 enum tx_desc0_status_bits {
283 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
284 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
285 };
286 enum tx_desc1_status_bits {
287 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
288 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
289 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
290 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
291 };
292 enum rx_desc0_status_bits {
293 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
294 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
295 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
296 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
297 };
298 enum rx_desc1_status_bits {
299 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
300 };
301
302 struct xircom_private {
303 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
304 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
305 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
306 struct sk_buff* tx_skbuff[TX_RING_SIZE];
307 #ifdef CARDBUS
308 /* The X3201-3 requires 4-byte aligned tx bufs */
309 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
310 #endif
311 /* The addresses of receive-in-place skbuffs. */
312 struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
314 int chip_id;
315 struct net_device_stats stats;
316 unsigned int cur_rx, cur_tx; /* The next free ring entry */
317 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
318 unsigned int tx_full:1; /* The Tx queue is full. */
319 unsigned int speed100:1;
320 unsigned int full_duplex:1; /* Full-duplex operation requested. */
321 unsigned int autoneg:1;
322 unsigned int default_port:4; /* Last dev->if_port value. */
323 unsigned int open:1;
324 unsigned int csr0; /* CSR0 setting. */
325 unsigned int csr6; /* Current CSR6 control settings. */
326 u16 to_advertise; /* NWay capabilities advertised. */
327 u16 advertising[4];
328 signed char phys[4], mii_cnt; /* MII device addresses. */
329 int saved_if_port;
330 struct pci_dev *pdev;
331 spinlock_t lock;
332 };
333
334 static int mdio_read(struct net_device *dev, int phy_id, int location);
335 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
336 static void xircom_up(struct net_device *dev);
337 static void xircom_down(struct net_device *dev);
338 static int xircom_open(struct net_device *dev);
339 static void xircom_tx_timeout(struct net_device *dev);
340 static void xircom_init_ring(struct net_device *dev);
341 static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
342 static int xircom_rx(struct net_device *dev);
343 static void xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
344 static int xircom_close(struct net_device *dev);
345 static struct net_device_stats *xircom_get_stats(struct net_device *dev);
346 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
347 static void set_rx_mode(struct net_device *dev);
348 static void check_duplex(struct net_device *dev);
349
350
351 /* The Xircom cards are picky about when certain bits in CSR6 can be
352 manipulated. Keith Owens <kaos@ocs.com.au>. */
outl_CSR6(u32 newcsr6,long ioaddr)353 static void outl_CSR6(u32 newcsr6, long ioaddr)
354 {
355 const int strict_bits =
356 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
357 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
358 unsigned long flags;
359 save_flags(flags);
360 cli();
361 /* mask out the reserved bits that always read 0 on the Xircom cards */
362 newcsr6 &= ~ReservedZeroMask;
363 /* or in the reserved bits that always read 1 */
364 newcsr6 |= ReservedOneMask;
365 currcsr6 = inl(ioaddr + CSR6);
366 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
367 ((currcsr6 & ~EnableTxRx) == 0)) {
368 outl(newcsr6, ioaddr + CSR6); /* safe */
369 restore_flags(flags);
370 return;
371 }
372 /* make sure the transmitter and receiver are stopped first */
373 currcsr6 &= ~EnableTxRx;
374 while (1) {
375 csr5 = inl(ioaddr + CSR5);
376 if (csr5 == 0xffffffff)
377 break; /* cannot read csr5, card removed? */
378 csr5_22_20 = csr5 & 0x700000;
379 csr5_19_17 = csr5 & 0x0e0000;
380 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
381 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
382 break; /* both are stopped or suspended */
383 if (!--attempts) {
384 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
385 "csr5=0x%08x\n", csr5);
386 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
387 restore_flags(flags);
388 return;
389 }
390 outl(currcsr6, ioaddr + CSR6);
391 udelay(1);
392 }
393 /* now it is safe to change csr6 */
394 outl(newcsr6, ioaddr + CSR6);
395 restore_flags(flags);
396 }
397
398
read_mac_address(struct net_device * dev)399 static void __devinit read_mac_address(struct net_device *dev)
400 {
401 long ioaddr = dev->base_addr;
402 int i, j;
403 unsigned char tuple, link, data_id, data_count;
404
405 /* Xircom has its address stored in the CIS;
406 * we access it through the boot rom interface for now
407 * this might not work, as the CIS is not parsed but I
408 * (danilo) use the offset I found on my card's CIS !!!
409 *
410 * Doug Ledford: I changed this routine around so that it
411 * walks the CIS memory space, parsing the config items, and
412 * finds the proper lan_node_id tuple and uses the data
413 * stored there.
414 */
415 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
416 for (i = 0x100; i < 0x1f7; i += link+2) {
417 outl(i, ioaddr + CSR10);
418 tuple = inl(ioaddr + CSR9) & 0xff;
419 outl(i + 1, ioaddr + CSR10);
420 link = inl(ioaddr + CSR9) & 0xff;
421 outl(i + 2, ioaddr + CSR10);
422 data_id = inl(ioaddr + CSR9) & 0xff;
423 outl(i + 3, ioaddr + CSR10);
424 data_count = inl(ioaddr + CSR9) & 0xff;
425 if ( (tuple == 0x22) &&
426 (data_id == 0x04) && (data_count == 0x06) ) {
427 /*
428 * This is it. We have the data we want.
429 */
430 for (j = 0; j < 6; j++) {
431 outl(i + j + 4, ioaddr + CSR10);
432 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
433 }
434 break;
435 } else if (link == 0) {
436 break;
437 }
438 }
439 }
440
441
442 /*
443 * locate the MII interfaces and initialize them.
444 * we disable full-duplex modes here,
445 * because we don't know how to handle them.
446 */
find_mii_transceivers(struct net_device * dev)447 static void find_mii_transceivers(struct net_device *dev)
448 {
449 struct xircom_private *tp = dev->priv;
450 int phy, phy_idx;
451
452 if (media_cap[tp->default_port] & MediaIsMII) {
453 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
454 tp->to_advertise = media2advert[tp->default_port - 9];
455 } else
456 tp->to_advertise =
457 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
458 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
459
460 /* Find the connected MII xcvrs.
461 Doing this in open() would allow detecting external xcvrs later,
462 but takes much time. */
463 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
464 int mii_status = mdio_read(dev, phy, MII_BMSR);
465 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
466 ((mii_status & BMSR_100BASE4) == 0 &&
467 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
468 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
469 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
470 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
471 tp->phys[phy_idx] = phy;
472 tp->advertising[phy_idx++] = reg4;
473 printk(KERN_INFO "%s: MII transceiver #%d "
474 "config %4.4x status %4.4x advertising %4.4x.\n",
475 dev->name, phy, mii_reg0, mii_status, mii_advert);
476 }
477 }
478 tp->mii_cnt = phy_idx;
479 if (phy_idx == 0) {
480 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
481 dev->name);
482 tp->phys[0] = 0;
483 }
484 }
485
486
487 /*
488 * To quote Arjan van de Ven:
489 * tranceiver_voodoo() enables the external UTP plug thingy.
490 * it's called voodoo as I stole this code and cannot cross-reference
491 * it with the specification.
492 * Actually it seems to go like this:
493 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
494 * so any prior MII settings are lost.
495 * - GPIO0 enables the TP port so the MII can talk to the network.
496 * - a software reset will reset both GPIO pins.
497 * I also moved the software reset here, because doing it in xircom_up()
498 * required enabling the GPIO pins each time, which reset the MII each time.
499 * Thus we couldn't control the MII -- which sucks because we don't know
500 * how to handle full-duplex modes so we *must* disable them.
501 */
transceiver_voodoo(struct net_device * dev)502 static void transceiver_voodoo(struct net_device *dev)
503 {
504 struct xircom_private *tp = dev->priv;
505 long ioaddr = dev->base_addr;
506
507 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
508 outl(SoftwareReset, ioaddr + CSR0);
509 udelay(2);
510
511 /* Deassert reset. */
512 outl(tp->csr0, ioaddr + CSR0);
513
514 /* Reset the xcvr interface and turn on heartbeat. */
515 outl(0x0008, ioaddr + CSR15);
516 udelay(5); /* The delays are Xircom-recommended to give the
517 * chipset time to reset the actual hardware
518 * on the PCMCIA card
519 */
520 outl(0xa8050000, ioaddr + CSR15);
521 udelay(5);
522 outl(0xa00f0000, ioaddr + CSR15);
523 udelay(5);
524
525 outl_CSR6(0, ioaddr);
526 //outl_CSR6(FullDuplexBit, ioaddr);
527 }
528
529
xircom_init_one(struct pci_dev * pdev,const struct pci_device_id * id)530 static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
531 {
532 struct net_device *dev;
533 struct xircom_private *tp;
534 static int board_idx = -1;
535 int chip_idx = id->driver_data;
536 long ioaddr;
537 int i;
538 u8 chip_rev;
539
540 /* when built into the kernel, we only print version if device is found */
541 #ifndef MODULE
542 static int printed_version;
543 if (!printed_version++)
544 printk(version);
545 #endif
546
547 //printk(KERN_INFO "xircom_init_one(%s)\n", pdev->slot_name);
548
549 board_idx++;
550
551 if (pci_enable_device(pdev))
552 return -ENODEV;
553
554 pci_set_master(pdev);
555
556 ioaddr = pci_resource_start(pdev, 0);
557 dev = alloc_etherdev(sizeof(*tp));
558 if (!dev) {
559 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
560 return -ENOMEM;
561 }
562 SET_MODULE_OWNER(dev);
563
564 dev->base_addr = ioaddr;
565 dev->irq = pdev->irq;
566
567 if (pci_request_regions(pdev, dev->name)) {
568 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
569 goto err_out_free_netdev;
570 }
571
572 /* Bring the chip out of sleep mode.
573 Caution: Snooze mode does not work with some boards! */
574 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
575 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
576
577 /* Stop the chip's Tx and Rx processes. */
578 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
579 /* Clear the missed-packet counter. */
580 (volatile int)inl(ioaddr + CSR8);
581
582 tp = dev->priv;
583
584 tp->lock = SPIN_LOCK_UNLOCKED;
585 tp->pdev = pdev;
586 tp->chip_id = chip_idx;
587 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
588 /* XXX: is this necessary for Xircom? */
589 tp->csr0 = csr0 & ~EnableMWI;
590
591 pci_set_drvdata(pdev, dev);
592
593 /* The lower four bits are the media type. */
594 if (board_idx >= 0 && board_idx < MAX_UNITS) {
595 tp->default_port = options[board_idx] & 15;
596 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
597 tp->full_duplex = 1;
598 if (mtu[board_idx] > 0)
599 dev->mtu = mtu[board_idx];
600 }
601 if (dev->mem_start)
602 tp->default_port = dev->mem_start;
603 if (tp->default_port) {
604 if (media_cap[tp->default_port] & MediaAlwaysFD)
605 tp->full_duplex = 1;
606 }
607 if (tp->full_duplex)
608 tp->autoneg = 0;
609 else
610 tp->autoneg = 1;
611 tp->speed100 = 1;
612
613 /* The Xircom-specific entries in the device structure. */
614 dev->open = &xircom_open;
615 dev->hard_start_xmit = &xircom_start_xmit;
616 dev->stop = &xircom_close;
617 dev->get_stats = &xircom_get_stats;
618 dev->do_ioctl = &xircom_ioctl;
619 #ifdef HAVE_MULTICAST
620 dev->set_multicast_list = &set_rx_mode;
621 #endif
622 dev->tx_timeout = xircom_tx_timeout;
623 dev->watchdog_timeo = TX_TIMEOUT;
624
625 transceiver_voodoo(dev);
626
627 read_mac_address(dev);
628
629 if (register_netdev(dev))
630 goto err_out_cleardev;
631
632 pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
633 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
634 dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
635 for (i = 0; i < 6; i++)
636 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
637 printk(", IRQ %d.\n", dev->irq);
638
639 if (xircom_tbl[chip_idx].flags & HAS_MII) {
640 find_mii_transceivers(dev);
641 check_duplex(dev);
642 }
643
644 return 0;
645
646 err_out_cleardev:
647 pci_set_drvdata(pdev, NULL);
648 pci_release_regions(pdev);
649 err_out_free_netdev:
650 unregister_netdev(dev);
651 kfree(dev);
652 return -ENODEV;
653 }
654
655
656 /* MII transceiver control section.
657 Read and write the MII registers using software-generated serial
658 MDIO protocol. See the MII specifications or DP83840A data sheet
659 for details. */
660
661 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
662 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
663 "overclocking" issues or future 66Mhz PCI. */
664 #define mdio_delay() inl(mdio_addr)
665
666 /* Read and write the MII registers using software-generated serial
667 MDIO protocol. It is just different enough from the EEPROM protocol
668 to not share code. The maxium data clock rate is 2.5 Mhz. */
669 #define MDIO_SHIFT_CLK 0x10000
670 #define MDIO_DATA_WRITE0 0x00000
671 #define MDIO_DATA_WRITE1 0x20000
672 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
673 #define MDIO_ENB_IN 0x40000
674 #define MDIO_DATA_READ 0x80000
675
mdio_read(struct net_device * dev,int phy_id,int location)676 static int mdio_read(struct net_device *dev, int phy_id, int location)
677 {
678 int i;
679 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
680 int retval = 0;
681 long ioaddr = dev->base_addr;
682 long mdio_addr = ioaddr + CSR9;
683
684 /* Establish sync by sending at least 32 logic ones. */
685 for (i = 32; i >= 0; i--) {
686 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
687 mdio_delay();
688 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
689 mdio_delay();
690 }
691 /* Shift the read command bits out. */
692 for (i = 15; i >= 0; i--) {
693 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
694
695 outl(MDIO_ENB | dataval, mdio_addr);
696 mdio_delay();
697 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
698 mdio_delay();
699 }
700 /* Read the two transition, 16 data, and wire-idle bits. */
701 for (i = 19; i > 0; i--) {
702 outl(MDIO_ENB_IN, mdio_addr);
703 mdio_delay();
704 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
705 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
706 mdio_delay();
707 }
708 return (retval>>1) & 0xffff;
709 }
710
711
mdio_write(struct net_device * dev,int phy_id,int location,int value)712 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
713 {
714 int i;
715 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
716 long ioaddr = dev->base_addr;
717 long mdio_addr = ioaddr + CSR9;
718
719 /* Establish sync by sending 32 logic ones. */
720 for (i = 32; i >= 0; i--) {
721 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
722 mdio_delay();
723 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
724 mdio_delay();
725 }
726 /* Shift the command bits out. */
727 for (i = 31; i >= 0; i--) {
728 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
729 outl(MDIO_ENB | dataval, mdio_addr);
730 mdio_delay();
731 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
732 mdio_delay();
733 }
734 /* Clear out extra bits. */
735 for (i = 2; i > 0; i--) {
736 outl(MDIO_ENB_IN, mdio_addr);
737 mdio_delay();
738 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
739 mdio_delay();
740 }
741 return;
742 }
743
744
745 static void
xircom_up(struct net_device * dev)746 xircom_up(struct net_device *dev)
747 {
748 struct xircom_private *tp = dev->priv;
749 long ioaddr = dev->base_addr;
750 int i;
751
752 /* Clear the tx ring */
753 for (i = 0; i < TX_RING_SIZE; i++) {
754 tp->tx_skbuff[i] = 0;
755 tp->tx_ring[i].status = 0;
756 }
757
758 if (xircom_debug > 1)
759 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
760
761 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
762 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
763
764 tp->saved_if_port = dev->if_port;
765 if (dev->if_port == 0)
766 dev->if_port = tp->default_port;
767
768 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
769
770 set_rx_mode(dev);
771
772 /* Start the chip's Tx to process setup frame. */
773 outl_CSR6(tp->csr6, ioaddr);
774 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
775
776 /* Acknowledge all outstanding interrupts sources */
777 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
778 /* Enable interrupts by setting the interrupt mask. */
779 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
780 /* Enable Rx */
781 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
782 /* Rx poll demand */
783 outl(0, ioaddr + CSR2);
784
785 /* Tell the net layer we're ready */
786 netif_start_queue (dev);
787
788 if (xircom_debug > 2) {
789 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
790 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
791 inl(ioaddr + CSR6));
792 }
793 }
794
795
796 static int
xircom_open(struct net_device * dev)797 xircom_open(struct net_device *dev)
798 {
799 struct xircom_private *tp = dev->priv;
800
801 if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
802 return -EAGAIN;
803
804 xircom_init_ring(dev);
805
806 xircom_up(dev);
807 tp->open = 1;
808
809 return 0;
810 }
811
812
xircom_tx_timeout(struct net_device * dev)813 static void xircom_tx_timeout(struct net_device *dev)
814 {
815 struct xircom_private *tp = dev->priv;
816 long ioaddr = dev->base_addr;
817
818 if (media_cap[dev->if_port] & MediaIsMII) {
819 /* Do nothing -- the media monitor should handle this. */
820 if (xircom_debug > 1)
821 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
822 dev->name);
823 }
824
825 #if defined(way_too_many_messages)
826 if (xircom_debug > 3) {
827 int i;
828 for (i = 0; i < RX_RING_SIZE; i++) {
829 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
830 int j;
831 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
832 "%2.2x %2.2x %2.2x.\n",
833 i, (unsigned int)tp->rx_ring[i].status,
834 (unsigned int)tp->rx_ring[i].length,
835 (unsigned int)tp->rx_ring[i].buffer1,
836 (unsigned int)tp->rx_ring[i].buffer2,
837 buf[0], buf[1], buf[2]);
838 for (j = 0; buf[j] != 0xee && j < 1600; j++)
839 if (j < 100) printk(" %2.2x", buf[j]);
840 printk(" j=%d.\n", j);
841 }
842 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
843 for (i = 0; i < RX_RING_SIZE; i++)
844 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
845 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
846 for (i = 0; i < TX_RING_SIZE; i++)
847 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
848 printk("\n");
849 }
850 #endif
851
852 /* Stop and restart the chip's Tx/Rx processes . */
853 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
854 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
855 /* Trigger an immediate transmit demand. */
856 outl(0, ioaddr + CSR1);
857
858 dev->trans_start = jiffies;
859 netif_wake_queue (dev);
860 tp->stats.tx_errors++;
861 }
862
863
864 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
xircom_init_ring(struct net_device * dev)865 static void xircom_init_ring(struct net_device *dev)
866 {
867 struct xircom_private *tp = dev->priv;
868 int i;
869
870 tp->tx_full = 0;
871 tp->cur_rx = tp->cur_tx = 0;
872 tp->dirty_rx = tp->dirty_tx = 0;
873
874 for (i = 0; i < RX_RING_SIZE; i++) {
875 tp->rx_ring[i].status = 0;
876 tp->rx_ring[i].length = PKT_BUF_SZ;
877 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
878 tp->rx_skbuff[i] = NULL;
879 }
880 /* Mark the last entry as wrapping the ring. */
881 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
882 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
883
884 for (i = 0; i < RX_RING_SIZE; i++) {
885 /* Note the receive buffer must be longword aligned.
886 dev_alloc_skb() provides 16 byte alignment. But do *not*
887 use skb_reserve() to align the IP header! */
888 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
889 tp->rx_skbuff[i] = skb;
890 if (skb == NULL)
891 break;
892 skb->dev = dev; /* Mark as being used by this device. */
893 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
894 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
895 }
896 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
897
898 /* The Tx buffer descriptor is filled in as needed, but we
899 do need to clear the ownership bit. */
900 for (i = 0; i < TX_RING_SIZE; i++) {
901 tp->tx_skbuff[i] = 0;
902 tp->tx_ring[i].status = 0;
903 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
904 #ifdef CARDBUS
905 if (tp->chip_id == X3201_3)
906 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
907 #endif /* CARDBUS */
908 }
909 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
910 }
911
912
913 static int
xircom_start_xmit(struct sk_buff * skb,struct net_device * dev)914 xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
915 {
916 struct xircom_private *tp = dev->priv;
917 int entry;
918 u32 flag;
919
920 /* Caution: the write order is important here, set the base address
921 with the "ownership" bits last. */
922
923 /* Calculate the next Tx descriptor entry. */
924 entry = tp->cur_tx % TX_RING_SIZE;
925
926 /* Seems to be needed even though the docs disagree */
927 if(skb->len < ETH_ZLEN)
928 {
929 skb = skb_padto(skb, ETH_ZLEN);
930 if(skb == NULL)
931 return 0;
932 }
933
934 tp->tx_skbuff[entry] = skb;
935 #ifdef CARDBUS
936 if (tp->chip_id == X3201_3) {
937 memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len);
938 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
939 } else
940 #endif
941 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
942
943 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
944 flag = Tx1WholePkt; /* No interrupt */
945 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
946 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
947 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
948 flag = Tx1WholePkt; /* No Tx-done intr. */
949 } else {
950 /* Leave room for set_rx_mode() to fill entries. */
951 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
952 tp->tx_full = 1;
953 }
954 if (entry == TX_RING_SIZE - 1)
955 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
956
957 tp->tx_ring[entry].length = skb->len | flag;
958 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
959 tp->cur_tx++;
960 if (tp->tx_full)
961 netif_stop_queue (dev);
962 else
963 netif_wake_queue (dev);
964
965 /* Trigger an immediate transmit demand. */
966 outl(0, dev->base_addr + CSR1);
967
968 dev->trans_start = jiffies;
969
970 return 0;
971 }
972
973
xircom_media_change(struct net_device * dev)974 static void xircom_media_change(struct net_device *dev)
975 {
976 struct xircom_private *tp = dev->priv;
977 long ioaddr = dev->base_addr;
978 u16 reg0, reg1, reg4, reg5;
979 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
980
981 /* reset status first */
982 mdio_read(dev, tp->phys[0], MII_BMCR);
983 mdio_read(dev, tp->phys[0], MII_BMSR);
984
985 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
986 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
987
988 if (reg1 & BMSR_LSTATUS) {
989 /* link is up */
990 if (reg0 & BMCR_ANENABLE) {
991 /* autonegotiation is enabled */
992 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
993 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
994 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
995 tp->speed100 = 1;
996 tp->full_duplex = 1;
997 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
998 tp->speed100 = 1;
999 tp->full_duplex = 0;
1000 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1001 tp->speed100 = 0;
1002 tp->full_duplex = 1;
1003 } else {
1004 tp->speed100 = 0;
1005 tp->full_duplex = 0;
1006 }
1007 } else {
1008 /* autonegotiation is disabled */
1009 if (reg0 & BMCR_SPEED100)
1010 tp->speed100 = 1;
1011 else
1012 tp->speed100 = 0;
1013 if (reg0 & BMCR_FULLDPLX)
1014 tp->full_duplex = 1;
1015 else
1016 tp->full_duplex = 0;
1017 }
1018 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1019 dev->name,
1020 tp->speed100 ? "100" : "10",
1021 tp->full_duplex ? "full" : "half");
1022 newcsr6 = csr6 & ~FullDuplexBit;
1023 if (tp->full_duplex)
1024 newcsr6 |= FullDuplexBit;
1025 if (newcsr6 != csr6)
1026 outl_CSR6(newcsr6, ioaddr + CSR6);
1027 } else {
1028 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1029 }
1030 }
1031
1032
check_duplex(struct net_device * dev)1033 static void check_duplex(struct net_device *dev)
1034 {
1035 struct xircom_private *tp = dev->priv;
1036 u16 reg0;
1037
1038 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1039 udelay(500);
1040 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1041
1042 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1043 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1044
1045 if (tp->autoneg) {
1046 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1047 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1048 } else {
1049 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1050 if (tp->speed100)
1051 reg0 |= BMCR_SPEED100;
1052 if (tp->full_duplex)
1053 reg0 |= BMCR_FULLDPLX;
1054 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1055 dev->name,
1056 tp->speed100 ? "100" : "10",
1057 tp->full_duplex ? "full" : "half");
1058 }
1059 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1060 }
1061
1062
1063 /* The interrupt handler does all of the Rx thread work and cleans up
1064 after the Tx thread. */
xircom_interrupt(int irq,void * dev_instance,struct pt_regs * regs)1065 static void xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1066 {
1067 struct net_device *dev = dev_instance;
1068 struct xircom_private *tp = dev->priv;
1069 long ioaddr = dev->base_addr;
1070 int csr5, work_budget = max_interrupt_work;
1071
1072 spin_lock (&tp->lock);
1073
1074 do {
1075 csr5 = inl(ioaddr + CSR5);
1076 /* Acknowledge all of the current interrupt sources ASAP. */
1077 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1078
1079 if (xircom_debug > 4)
1080 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1081 dev->name, csr5, inl(dev->base_addr + CSR5));
1082
1083 if (csr5 == 0xffffffff)
1084 break; /* all bits set, assume PCMCIA card removed */
1085
1086 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1087 break;
1088
1089 if (csr5 & (RxIntr | RxNoBuf))
1090 work_budget -= xircom_rx(dev);
1091
1092 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1093 unsigned int dirty_tx;
1094
1095 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1096 dirty_tx++) {
1097 int entry = dirty_tx % TX_RING_SIZE;
1098 int status = tp->tx_ring[entry].status;
1099
1100 if (status < 0)
1101 break; /* It still hasn't been Txed */
1102 /* Check for Rx filter setup frames. */
1103 if (tp->tx_skbuff[entry] == NULL)
1104 continue;
1105
1106 if (status & Tx0DescError) {
1107 /* There was an major error, log it. */
1108 #ifndef final_version
1109 if (xircom_debug > 1)
1110 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1111 dev->name, status);
1112 #endif
1113 tp->stats.tx_errors++;
1114 if (status & Tx0ManyColl) {
1115 tp->stats.tx_aborted_errors++;
1116 }
1117 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1118 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1119 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1120 } else {
1121 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1122 tp->stats.collisions += (status >> 3) & 15;
1123 tp->stats.tx_packets++;
1124 }
1125
1126 /* Free the original skb. */
1127 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1128 tp->tx_skbuff[entry] = 0;
1129 }
1130
1131 #ifndef final_version
1132 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1133 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1134 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1135 dirty_tx += TX_RING_SIZE;
1136 }
1137 #endif
1138
1139 if (tp->tx_full &&
1140 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1141 /* The ring is no longer full */
1142 tp->tx_full = 0;
1143
1144 if (tp->tx_full)
1145 netif_stop_queue (dev);
1146 else
1147 netif_wake_queue (dev);
1148
1149 tp->dirty_tx = dirty_tx;
1150 if (csr5 & TxDied) {
1151 if (xircom_debug > 2)
1152 printk(KERN_WARNING "%s: The transmitter stopped."
1153 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1154 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1155 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1156 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1157 }
1158 }
1159
1160 /* Log errors. */
1161 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1162 if (csr5 & LinkChange)
1163 xircom_media_change(dev);
1164 if (csr5 & TxFIFOUnderflow) {
1165 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1166 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1167 else
1168 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1169 /* Restart the transmit process. */
1170 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1171 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1172 }
1173 if (csr5 & RxDied) { /* Missed a Rx frame. */
1174 tp->stats.rx_errors++;
1175 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1176 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1177 }
1178 /* Clear all error sources, included undocumented ones! */
1179 outl(0x0800f7ba, ioaddr + CSR5);
1180 }
1181 if (--work_budget < 0) {
1182 if (xircom_debug > 1)
1183 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1184 "csr5=0x%8.8x.\n", dev->name, csr5);
1185 /* Acknowledge all interrupt sources. */
1186 outl(0x8001ffff, ioaddr + CSR5);
1187 break;
1188 }
1189 } while (1);
1190
1191 if (xircom_debug > 3)
1192 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1193 dev->name, inl(ioaddr + CSR5));
1194
1195 spin_unlock (&tp->lock);
1196 }
1197
1198
1199 static int
xircom_rx(struct net_device * dev)1200 xircom_rx(struct net_device *dev)
1201 {
1202 struct xircom_private *tp = dev->priv;
1203 int entry = tp->cur_rx % RX_RING_SIZE;
1204 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1205 int work_done = 0;
1206
1207 if (xircom_debug > 4)
1208 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1209 tp->rx_ring[entry].status);
1210 /* If we own the next entry, it's a new packet. Send it up. */
1211 while (tp->rx_ring[entry].status >= 0) {
1212 s32 status = tp->rx_ring[entry].status;
1213
1214 if (xircom_debug > 5)
1215 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1216 tp->rx_ring[entry].status);
1217 if (--rx_work_limit < 0)
1218 break;
1219 if ((status & 0x38008300) != 0x0300) {
1220 if ((status & 0x38000300) != 0x0300) {
1221 /* Ignore earlier buffers. */
1222 if ((status & 0xffff) != 0x7fff) {
1223 if (xircom_debug > 1)
1224 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1225 "spanned multiple buffers, status %8.8x!\n",
1226 dev->name, status);
1227 tp->stats.rx_length_errors++;
1228 }
1229 } else if (status & Rx0DescError) {
1230 /* There was a fatal error. */
1231 if (xircom_debug > 2)
1232 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1233 dev->name, status);
1234 tp->stats.rx_errors++; /* end of a packet.*/
1235 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1236 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1237 }
1238 } else {
1239 /* Omit the four octet CRC from the length. */
1240 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1241 struct sk_buff *skb;
1242
1243 #ifndef final_version
1244 if (pkt_len > 1518) {
1245 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1246 dev->name, pkt_len, pkt_len);
1247 pkt_len = 1518;
1248 tp->stats.rx_length_errors++;
1249 }
1250 #endif
1251 /* Check if the packet is long enough to accept without copying
1252 to a minimally-sized skbuff. */
1253 if (pkt_len < rx_copybreak
1254 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1255 skb->dev = dev;
1256 skb_reserve(skb, 2); /* 16 byte align the IP header */
1257 #if ! defined(__alpha__)
1258 eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1259 pkt_len, 0);
1260 skb_put(skb, pkt_len);
1261 #else
1262 memcpy(skb_put(skb, pkt_len),
1263 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1264 #endif
1265 work_done++;
1266 } else { /* Pass up the skb already on the Rx ring. */
1267 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1268 tp->rx_skbuff[entry] = NULL;
1269 }
1270 skb->protocol = eth_type_trans(skb, dev);
1271 netif_rx(skb);
1272 dev->last_rx = jiffies;
1273 tp->stats.rx_packets++;
1274 tp->stats.rx_bytes += pkt_len;
1275 }
1276 entry = (++tp->cur_rx) % RX_RING_SIZE;
1277 }
1278
1279 /* Refill the Rx ring buffers. */
1280 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1281 entry = tp->dirty_rx % RX_RING_SIZE;
1282 if (tp->rx_skbuff[entry] == NULL) {
1283 struct sk_buff *skb;
1284 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1285 if (skb == NULL)
1286 break;
1287 skb->dev = dev; /* Mark as being used by this device. */
1288 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
1289 work_done++;
1290 }
1291 tp->rx_ring[entry].status = Rx0DescOwned;
1292 }
1293
1294 return work_done;
1295 }
1296
1297
1298 static void
xircom_down(struct net_device * dev)1299 xircom_down(struct net_device *dev)
1300 {
1301 long ioaddr = dev->base_addr;
1302 struct xircom_private *tp = dev->priv;
1303
1304 /* Disable interrupts by clearing the interrupt mask. */
1305 outl(0, ioaddr + CSR7);
1306 /* Stop the chip's Tx and Rx processes. */
1307 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1308
1309 if (inl(ioaddr + CSR6) != 0xffffffff)
1310 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1311
1312 dev->if_port = tp->saved_if_port;
1313 }
1314
1315
1316 static int
xircom_close(struct net_device * dev)1317 xircom_close(struct net_device *dev)
1318 {
1319 long ioaddr = dev->base_addr;
1320 struct xircom_private *tp = dev->priv;
1321 int i;
1322
1323 if (xircom_debug > 1)
1324 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1325 dev->name, inl(ioaddr + CSR5));
1326
1327 netif_stop_queue(dev);
1328
1329 if (netif_device_present(dev))
1330 xircom_down(dev);
1331
1332 free_irq(dev->irq, dev);
1333
1334 /* Free all the skbuffs in the Rx queue. */
1335 for (i = 0; i < RX_RING_SIZE; i++) {
1336 struct sk_buff *skb = tp->rx_skbuff[i];
1337 tp->rx_skbuff[i] = 0;
1338 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1339 tp->rx_ring[i].length = 0;
1340 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1341 if (skb) {
1342 dev_kfree_skb(skb);
1343 }
1344 }
1345 for (i = 0; i < TX_RING_SIZE; i++) {
1346 if (tp->tx_skbuff[i])
1347 dev_kfree_skb(tp->tx_skbuff[i]);
1348 tp->tx_skbuff[i] = 0;
1349 }
1350
1351 tp->open = 0;
1352 return 0;
1353 }
1354
1355
xircom_get_stats(struct net_device * dev)1356 static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1357 {
1358 struct xircom_private *tp = dev->priv;
1359 long ioaddr = dev->base_addr;
1360
1361 if (netif_device_present(dev))
1362 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1363
1364 return &tp->stats;
1365 }
1366
1367
xircom_ethtool_ioctl(struct net_device * dev,void * useraddr)1368 static int xircom_ethtool_ioctl(struct net_device *dev, void *useraddr)
1369 {
1370 struct ethtool_cmd ecmd;
1371 struct xircom_private *tp = dev->priv;
1372
1373 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1374 return -EFAULT;
1375
1376 switch (ecmd.cmd) {
1377 case ETHTOOL_GSET:
1378 ecmd.supported =
1379 SUPPORTED_10baseT_Half |
1380 SUPPORTED_10baseT_Full |
1381 SUPPORTED_100baseT_Half |
1382 SUPPORTED_100baseT_Full |
1383 SUPPORTED_Autoneg |
1384 SUPPORTED_MII;
1385
1386 ecmd.advertising = ADVERTISED_MII;
1387 if (tp->advertising[0] & ADVERTISE_10HALF)
1388 ecmd.advertising |= ADVERTISED_10baseT_Half;
1389 if (tp->advertising[0] & ADVERTISE_10FULL)
1390 ecmd.advertising |= ADVERTISED_10baseT_Full;
1391 if (tp->advertising[0] & ADVERTISE_100HALF)
1392 ecmd.advertising |= ADVERTISED_100baseT_Half;
1393 if (tp->advertising[0] & ADVERTISE_100FULL)
1394 ecmd.advertising |= ADVERTISED_100baseT_Full;
1395 if (tp->autoneg) {
1396 ecmd.advertising |= ADVERTISED_Autoneg;
1397 ecmd.autoneg = AUTONEG_ENABLE;
1398 } else
1399 ecmd.autoneg = AUTONEG_DISABLE;
1400
1401 ecmd.port = PORT_MII;
1402 ecmd.transceiver = XCVR_INTERNAL;
1403 ecmd.phy_address = tp->phys[0];
1404 ecmd.speed = tp->speed100 ? SPEED_100 : SPEED_10;
1405 ecmd.duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1406 ecmd.maxtxpkt = TX_RING_SIZE / 2;
1407 ecmd.maxrxpkt = 0;
1408
1409 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1410 return -EFAULT;
1411 return 0;
1412
1413 case ETHTOOL_SSET: {
1414 u16 autoneg, speed100, full_duplex;
1415
1416 autoneg = (ecmd.autoneg == AUTONEG_ENABLE);
1417 speed100 = (ecmd.speed == SPEED_100);
1418 full_duplex = (ecmd.duplex == DUPLEX_FULL);
1419
1420 tp->autoneg = autoneg;
1421 if (speed100 != tp->speed100 ||
1422 full_duplex != tp->full_duplex) {
1423 tp->speed100 = speed100;
1424 tp->full_duplex = full_duplex;
1425 /* change advertising bits */
1426 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1427 ADVERTISE_10FULL |
1428 ADVERTISE_100HALF |
1429 ADVERTISE_100FULL |
1430 ADVERTISE_100BASE4);
1431 if (speed100) {
1432 if (full_duplex)
1433 tp->advertising[0] |= ADVERTISE_100FULL;
1434 else
1435 tp->advertising[0] |= ADVERTISE_100HALF;
1436 } else {
1437 if (full_duplex)
1438 tp->advertising[0] |= ADVERTISE_10FULL;
1439 else
1440 tp->advertising[0] |= ADVERTISE_10HALF;
1441 }
1442 }
1443 check_duplex(dev);
1444 return 0;
1445 }
1446
1447 case ETHTOOL_GDRVINFO: {
1448 struct ethtool_drvinfo info;
1449 memset(&info, 0, sizeof(info));
1450 info.cmd = ecmd.cmd;
1451 strcpy(info.driver, DRV_NAME);
1452 strcpy(info.version, DRV_VERSION);
1453 *info.fw_version = 0;
1454 strcpy(info.bus_info, tp->pdev->slot_name);
1455 if (copy_to_user(useraddr, &info, sizeof(info)))
1456 return -EFAULT;
1457 return 0;
1458 }
1459
1460 default:
1461 return -EOPNOTSUPP;
1462 }
1463 }
1464
1465
1466 /* Provide ioctl() calls to examine the MII xcvr state. */
xircom_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1467 static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1468 {
1469 struct xircom_private *tp = dev->priv;
1470 u16 *data = (u16 *)&rq->ifr_data;
1471 int phy = tp->phys[0] & 0x1f;
1472 unsigned long flags;
1473
1474 switch(cmd) {
1475 case SIOCETHTOOL:
1476 return xircom_ethtool_ioctl(dev, (void *) rq->ifr_data);
1477
1478 /* Legacy mii-diag interface */
1479 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1480 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
1481 if (tp->mii_cnt)
1482 data[0] = phy;
1483 else
1484 return -ENODEV;
1485 return 0;
1486 case SIOCGMIIREG: /* Read MII PHY register. */
1487 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
1488 save_flags(flags);
1489 cli();
1490 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1491 restore_flags(flags);
1492 return 0;
1493 case SIOCSMIIREG: /* Write MII PHY register. */
1494 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
1495 if (!capable(CAP_NET_ADMIN))
1496 return -EPERM;
1497 save_flags(flags);
1498 cli();
1499 if (data[0] == tp->phys[0]) {
1500 u16 value = data[2];
1501 switch (data[1]) {
1502 case 0:
1503 if (value & (BMCR_RESET | BMCR_ANENABLE))
1504 /* Autonegotiation. */
1505 tp->autoneg = 1;
1506 else {
1507 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1508 tp->autoneg = 0;
1509 }
1510 break;
1511 case 4:
1512 tp->advertising[0] = value;
1513 break;
1514 }
1515 check_duplex(dev);
1516 }
1517 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1518 restore_flags(flags);
1519 return 0;
1520 default:
1521 return -EOPNOTSUPP;
1522 }
1523
1524 return -EOPNOTSUPP;
1525 }
1526
1527 /* Set or clear the multicast filter for this adaptor.
1528 Note that we only use exclusion around actually queueing the
1529 new frame, not around filling tp->setup_frame. This is non-deterministic
1530 when re-entered but still correct. */
set_rx_mode(struct net_device * dev)1531 static void set_rx_mode(struct net_device *dev)
1532 {
1533 struct xircom_private *tp = dev->priv;
1534 struct dev_mc_list *mclist;
1535 long ioaddr = dev->base_addr;
1536 int csr6 = inl(ioaddr + CSR6);
1537 u16 *eaddrs, *setup_frm;
1538 u32 tx_flags;
1539 int i;
1540
1541 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1542 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1543 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1544 tp->csr6 |= PromiscBit;
1545 csr6 |= PromiscBit;
1546 goto out;
1547 }
1548
1549 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1550 /* Too many to filter well -- accept all multicasts. */
1551 tp->csr6 |= AllMultiBit;
1552 csr6 |= AllMultiBit;
1553 goto out;
1554 }
1555
1556 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1557
1558 /* Note that only the low-address shortword of setup_frame is valid! */
1559 setup_frm = tp->setup_frame;
1560 mclist = dev->mc_list;
1561
1562 /* Fill the first entry with our physical address. */
1563 eaddrs = (u16 *)dev->dev_addr;
1564 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1565 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1566 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1567
1568 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1569 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1570 u32 hash, hash2;
1571
1572 tx_flags |= Tx1HashSetup;
1573 tp->csr6 |= HashFilterBit;
1574 csr6 |= HashFilterBit;
1575
1576 /* Fill the unused 3 entries with the broadcast address.
1577 At least one entry *must* contain the broadcast address!!!*/
1578 for (i = 0; i < 3; i++) {
1579 *setup_frm = 0xffff; setup_frm += 2;
1580 *setup_frm = 0xffff; setup_frm += 2;
1581 *setup_frm = 0xffff; setup_frm += 2;
1582 }
1583
1584 /* Truly brain-damaged hash filter layout */
1585 /* XXX: not sure if I should take the last or the first 9 bits */
1586 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1587 u32 *hptr;
1588 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1589 if (hash < 384) {
1590 hash2 = hash + ((hash >> 4) << 4) +
1591 ((hash >> 5) << 5);
1592 } else {
1593 hash -= 384;
1594 hash2 = 64 + hash + (hash >> 4) * 80;
1595 }
1596 hptr = &hash_table[hash2 & ~0x1f];
1597 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1598 }
1599 } else {
1600 /* We have <= 14 mcast addresses so we can use Xircom's
1601 wonderful 16-address perfect filter. */
1602 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1603 eaddrs = (u16 *)mclist->dmi_addr;
1604 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1605 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1606 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1607 }
1608 /* Fill the unused entries with the broadcast address.
1609 At least one entry *must* contain the broadcast address!!!*/
1610 for (; i < 15; i++) {
1611 *setup_frm = 0xffff; setup_frm += 2;
1612 *setup_frm = 0xffff; setup_frm += 2;
1613 *setup_frm = 0xffff; setup_frm += 2;
1614 }
1615 }
1616
1617 /* Now add this frame to the Tx list. */
1618 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1619 /* Same setup recently queued, we need not add it. */
1620 /* XXX: Huh? All it means is that the Tx list is full...*/
1621 } else {
1622 unsigned long flags;
1623 unsigned int entry;
1624 int dummy = -1;
1625
1626 save_flags(flags); cli();
1627 entry = tp->cur_tx++ % TX_RING_SIZE;
1628
1629 if (entry != 0) {
1630 /* Avoid a chip errata by prefixing a dummy entry. */
1631 tp->tx_skbuff[entry] = 0;
1632 tp->tx_ring[entry].length =
1633 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1634 tp->tx_ring[entry].buffer1 = 0;
1635 /* race with chip, set Tx0DescOwned later */
1636 dummy = entry;
1637 entry = tp->cur_tx++ % TX_RING_SIZE;
1638 }
1639
1640 tp->tx_skbuff[entry] = 0;
1641 /* Put the setup frame on the Tx list. */
1642 if (entry == TX_RING_SIZE - 1)
1643 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1644 tp->tx_ring[entry].length = tx_flags;
1645 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1646 tp->tx_ring[entry].status = Tx0DescOwned;
1647 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1648 tp->tx_full = 1;
1649 netif_stop_queue (dev);
1650 }
1651 if (dummy >= 0)
1652 tp->tx_ring[dummy].status = Tx0DescOwned;
1653 restore_flags(flags);
1654 /* Trigger an immediate transmit demand. */
1655 outl(0, ioaddr + CSR1);
1656 }
1657
1658 out:
1659 outl_CSR6(csr6, ioaddr);
1660 }
1661
1662
1663 static struct pci_device_id xircom_pci_table[] __devinitdata = {
1664 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1665 {0},
1666 };
1667 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1668
1669
1670 #ifdef CONFIG_PM
xircom_suspend(struct pci_dev * pdev,u32 state)1671 static int xircom_suspend(struct pci_dev *pdev, u32 state)
1672 {
1673 struct net_device *dev = pci_get_drvdata(pdev);
1674 struct xircom_private *tp = dev->priv;
1675 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1676 if (tp->open)
1677 xircom_down(dev);
1678 return 0;
1679 }
1680
1681
xircom_resume(struct pci_dev * pdev)1682 static int xircom_resume(struct pci_dev *pdev)
1683 {
1684 struct net_device *dev = pci_get_drvdata(pdev);
1685 struct xircom_private *tp = dev->priv;
1686 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1687
1688 /* Bring the chip out of sleep mode.
1689 Caution: Snooze mode does not work with some boards! */
1690 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1691 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1692
1693 transceiver_voodoo(dev);
1694 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1695 check_duplex(dev);
1696
1697 if (tp->open)
1698 xircom_up(dev);
1699 return 0;
1700 }
1701 #endif /* CONFIG_PM */
1702
1703
xircom_remove_one(struct pci_dev * pdev)1704 static void __devexit xircom_remove_one(struct pci_dev *pdev)
1705 {
1706 struct net_device *dev = pci_get_drvdata(pdev);
1707
1708 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1709 unregister_netdev(dev);
1710 pci_release_regions(pdev);
1711 kfree(dev);
1712 pci_set_drvdata(pdev, NULL);
1713 }
1714
1715
1716 static struct pci_driver xircom_driver = {
1717 name: DRV_NAME,
1718 id_table: xircom_pci_table,
1719 probe: xircom_init_one,
1720 remove: __devexit_p(xircom_remove_one),
1721 #ifdef CONFIG_PM
1722 suspend: xircom_suspend,
1723 resume: xircom_resume
1724 #endif /* CONFIG_PM */
1725 };
1726
1727
xircom_init(void)1728 static int __init xircom_init(void)
1729 {
1730 /* when a module, this is printed whether or not devices are found in probe */
1731 #ifdef MODULE
1732 printk(version);
1733 #endif
1734 return pci_module_init(&xircom_driver);
1735 }
1736
1737
xircom_exit(void)1738 static void __exit xircom_exit(void)
1739 {
1740 pci_unregister_driver(&xircom_driver);
1741 }
1742
1743 module_init(xircom_init)
1744 module_exit(xircom_exit)
1745
1746 /*
1747 * Local variables:
1748 * c-indent-level: 4
1749 * c-basic-offset: 4
1750 * tab-width: 4
1751 * End:
1752 */
1753