1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
7 *
8 * Copyright (C) 1999, 2000, 2001 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
10 *
11 * References:
12 * o IOC3 ASIC specification 4.51, 1996-04-18
13 * o IEEE 802.3 specification, 2000 edition
14 * o DP38840A Specification, National Semiconductor, March 1997
15 *
16 * To do:
17 *
18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
19 * o Handle allocation failures in ioc3_init_rings().
20 * o Use prefetching for large packets. What is a good lower limit for
21 * prefetching?
22 * o We're probably allocating a bit too much memory.
23 * o Use hardware checksums.
24 * o Convert to using a IOC3 meta driver.
25 * o Which PHYs might possibly be attached to the IOC3 in real live,
26 * which workarounds are required for them? Do we ever have Lucent's?
27 * o For the 2.5 branch kill the mii-tool ioctls.
28 */
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/crc32.h>
38
39 #ifdef CONFIG_SERIAL
40 #include <linux/serial.h>
41 #include <asm/serial.h>
42 #define IOC3_BAUD (22000000 / (3*16))
43 #define IOC3_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
44 #endif
45
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/skbuff.h>
50 #include <linux/dp83840.h>
51
52 #include <asm/byteorder.h>
53 #include <asm/io.h>
54 #include <asm/pgtable.h>
55 #include <asm/uaccess.h>
56 #include <asm/sn/types.h>
57 #include <asm/sn/sn0/addrs.h>
58 #include <asm/sn/sn0/hubni.h>
59 #include <asm/sn/sn0/hubio.h>
60 #include <asm/sn/klconfig.h>
61 #include <asm/sn/ioc3.h>
62 #include <asm/sn/sn0/ip27.h>
63 #include <asm/pci/bridge.h>
64
65 /*
66 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
67 * value must be a power of two.
68 */
69 #define RX_BUFFS 64
70
71 /* Timer state engine. */
72 enum ioc3_timer_state {
73 arbwait = 0, /* Waiting for auto negotiation to complete. */
74 lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
75 ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
76 asleep = 3, /* Time inactive. */
77 };
78
79 /* Private per NIC data of the driver. */
80 struct ioc3_private {
81 struct ioc3 *regs;
82 int phy;
83 unsigned long *rxr; /* pointer to receiver ring */
84 struct ioc3_etxd *txr;
85 struct sk_buff *rx_skbs[512];
86 struct sk_buff *tx_skbs[128];
87 struct net_device_stats stats;
88 int rx_ci; /* RX consumer index */
89 int rx_pi; /* RX producer index */
90 int tx_ci; /* TX consumer index */
91 int tx_pi; /* TX producer index */
92 int txqlen;
93 u32 emcr, ehar_h, ehar_l;
94 spinlock_t ioc3_lock;
95 struct net_device *dev;
96
97 /* Members used by autonegotiation */
98 struct timer_list ioc3_timer;
99 enum ioc3_timer_state timer_state; /* State of auto-neg timer. */
100 unsigned int timer_ticks; /* Number of clicks at each state */
101 unsigned short sw_bmcr; /* sw copy of MII config register */
102 unsigned short sw_bmsr; /* sw copy of MII status register */
103 unsigned short sw_physid1; /* sw copy of PHYSID1 */
104 unsigned short sw_physid2; /* sw copy of PHYSID2 */
105 unsigned short sw_advertise; /* sw copy of ADVERTISE */
106 unsigned short sw_lpa; /* sw copy of LPA */
107 unsigned short sw_csconfig; /* sw copy of CSCONFIG */
108 };
109
110 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111 static void ioc3_set_multicast_list(struct net_device *dev);
112 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
113 static void ioc3_timeout(struct net_device *dev);
114 static inline unsigned int ioc3_hash(const unsigned char *addr);
115 static inline void ioc3_stop(struct ioc3_private *ip);
116 static void ioc3_init(struct ioc3_private *ip);
117
118 static const char ioc3_str[] = "IOC3 Ethernet";
119
120 /* We use this to acquire receive skb's that we can DMA directly into. */
121 #define ALIGNED_RX_SKB_ADDR(addr) \
122 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
123
124 #define ioc3_alloc_skb(__length, __gfp_flags) \
125 ({ struct sk_buff *__skb; \
126 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
127 if (__skb) { \
128 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
129 if(__offset) \
130 skb_reserve(__skb, __offset); \
131 } \
132 __skb; \
133 })
134
135 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
136 1644 while it's actually 1664. This one was nasty to track down ... */
137 #define RX_OFFSET 10
138 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
139
140 /* DMA barrier to separate cached and uncached accesses. */
141 #define BARRIER() \
142 __asm__("sync" ::: "memory")
143
144
145 #define IOC3_SIZE 0x100000
146
147 #define ioc3_r(reg) \
148 ({ \
149 u32 __res; \
150 __res = ioc3->reg; \
151 __res; \
152 })
153
154 #define ioc3_w(reg,val) \
155 do { \
156 (ioc3->reg = (val)); \
157 } while(0)
158
159 static inline u32
mcr_pack(u32 pulse,u32 sample)160 mcr_pack(u32 pulse, u32 sample)
161 {
162 return (pulse << 10) | (sample << 2);
163 }
164
165 static int
nic_wait(struct ioc3 * ioc3)166 nic_wait(struct ioc3 *ioc3)
167 {
168 u32 mcr;
169
170 do {
171 mcr = ioc3_r(mcr);
172 } while (!(mcr & 2));
173
174 return mcr & 1;
175 }
176
177 static int
nic_reset(struct ioc3 * ioc3)178 nic_reset(struct ioc3 *ioc3)
179 {
180 int presence;
181
182 ioc3_w(mcr, mcr_pack(500, 65));
183 presence = nic_wait(ioc3);
184
185 ioc3_w(mcr, mcr_pack(0, 500));
186 nic_wait(ioc3);
187
188 return presence;
189 }
190
191 static inline int
nic_read_bit(struct ioc3 * ioc3)192 nic_read_bit(struct ioc3 *ioc3)
193 {
194 int result;
195
196 ioc3_w(mcr, mcr_pack(6, 13));
197 result = nic_wait(ioc3);
198 ioc3_w(mcr, mcr_pack(0, 100));
199 nic_wait(ioc3);
200
201 return result;
202 }
203
204 static inline void
nic_write_bit(struct ioc3 * ioc3,int bit)205 nic_write_bit(struct ioc3 *ioc3, int bit)
206 {
207 if (bit)
208 ioc3_w(mcr, mcr_pack(6, 110));
209 else
210 ioc3_w(mcr, mcr_pack(80, 30));
211
212 nic_wait(ioc3);
213 }
214
215 /*
216 * Read a byte from an iButton device
217 */
218 static u32
nic_read_byte(struct ioc3 * ioc3)219 nic_read_byte(struct ioc3 *ioc3)
220 {
221 u32 result = 0;
222 int i;
223
224 for (i = 0; i < 8; i++)
225 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
226
227 return result;
228 }
229
230 /*
231 * Write a byte to an iButton device
232 */
233 static void
nic_write_byte(struct ioc3 * ioc3,int byte)234 nic_write_byte(struct ioc3 *ioc3, int byte)
235 {
236 int i, bit;
237
238 for (i = 8; i; i--) {
239 bit = byte & 1;
240 byte >>= 1;
241
242 nic_write_bit(ioc3, bit);
243 }
244 }
245
246 static u64
nic_find(struct ioc3 * ioc3,int * last)247 nic_find(struct ioc3 *ioc3, int *last)
248 {
249 int a, b, index, disc;
250 u64 address = 0;
251
252 nic_reset(ioc3);
253 /* Search ROM. */
254 nic_write_byte(ioc3, 0xf0);
255
256 /* Algorithm from ``Book of iButton Standards''. */
257 for (index = 0, disc = 0; index < 64; index++) {
258 a = nic_read_bit(ioc3);
259 b = nic_read_bit(ioc3);
260
261 if (a && b) {
262 printk("NIC search failed (not fatal).\n");
263 *last = 0;
264 return 0;
265 }
266
267 if (!a && !b) {
268 if (index == *last) {
269 address |= 1UL << index;
270 } else if (index > *last) {
271 address &= ~(1UL << index);
272 disc = index;
273 } else if ((address & (1UL << index)) == 0)
274 disc = index;
275 nic_write_bit(ioc3, address & (1UL << index));
276 continue;
277 } else {
278 if (a)
279 address |= 1UL << index;
280 else
281 address &= ~(1UL << index);
282 nic_write_bit(ioc3, a);
283 continue;
284 }
285 }
286
287 *last = disc;
288
289 return address;
290 }
291
nic_init(struct ioc3 * ioc3)292 static int nic_init(struct ioc3 *ioc3)
293 {
294 const char *type;
295 u8 crc;
296 u8 serial[6];
297 int save = 0, i;
298
299 type = "unknown";
300
301 while (1) {
302 u64 reg;
303 reg = nic_find(ioc3, &save);
304
305 switch (reg & 0xff) {
306 case 0x91:
307 type = "DS1981U";
308 break;
309 default:
310 if (save == 0) {
311 /* Let the caller try again. */
312 return -1;
313 }
314 continue;
315 }
316
317 nic_reset(ioc3);
318
319 /* Match ROM. */
320 nic_write_byte(ioc3, 0x55);
321 for (i = 0; i < 8; i++)
322 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
323
324 reg >>= 8; /* Shift out type. */
325 for (i = 0; i < 6; i++) {
326 serial[i] = reg & 0xff;
327 reg >>= 8;
328 }
329 crc = reg & 0xff;
330 break;
331 }
332
333 printk("Found %s NIC", type);
334 if (type != "unknown") {
335 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
336 " CRC %02x", serial[0], serial[1], serial[2],
337 serial[3], serial[4], serial[5], crc);
338 }
339 printk(".\n");
340
341 return 0;
342 }
343
344 /*
345 * Read the NIC (Number-In-a-Can) device used to store the MAC address on
346 * SN0 / SN00 nodeboards and PCI cards.
347 */
ioc3_get_eaddr_nic(struct ioc3_private * ip)348 static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
349 {
350 struct ioc3 *ioc3 = ip->regs;
351 u8 nic[14];
352 int tries = 2; /* There may be some problem with the battery? */
353 int i;
354
355 ioc3_w(gpcr_s, (1 << 21));
356
357 while (tries--) {
358 if (!nic_init(ioc3))
359 break;
360 udelay(500);
361 }
362
363 if (tries < 0) {
364 printk("Failed to read MAC address\n");
365 return;
366 }
367
368 /* Read Memory. */
369 nic_write_byte(ioc3, 0xf0);
370 nic_write_byte(ioc3, 0x00);
371 nic_write_byte(ioc3, 0x00);
372
373 for (i = 13; i >= 0; i--)
374 nic[i] = nic_read_byte(ioc3);
375
376 for (i = 2; i < 8; i++)
377 ip->dev->dev_addr[i - 2] = nic[i];
378 }
379
380 /*
381 * Ok, this is hosed by design. It's necessary to know what machine the
382 * NIC is in in order to know how to read the NIC address. We also have
383 * to know if it's a PCI card or a NIC in on the node board ...
384 */
ioc3_get_eaddr(struct ioc3_private * ip)385 static void ioc3_get_eaddr(struct ioc3_private *ip)
386 {
387 int i;
388
389
390 ioc3_get_eaddr_nic(ip);
391
392 printk("Ethernet address is ");
393 for (i = 0; i < 6; i++) {
394 printk("%02x", ip->dev->dev_addr[i]);
395 if (i < 5)
396 printk(":");
397 }
398 printk(".\n");
399 }
400
401
402 /*
403 * Caller must hold the ioc3_lock ever for MII readers. This is also
404 * used to protect the transmitter side but it's low contention.
405 */
mii_read(struct ioc3_private * ip,int reg)406 static u16 mii_read(struct ioc3_private *ip, int reg)
407 {
408 struct ioc3 *ioc3 = ip->regs;
409 int phy = ip->phy;
410
411 while (ioc3->micr & MICR_BUSY);
412 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
413 while (ioc3->micr & MICR_BUSY);
414
415 return ioc3->midr_r & MIDR_DATA_MASK;
416 }
417
mii_write(struct ioc3_private * ip,int reg,u16 data)418 static void mii_write(struct ioc3_private *ip, int reg, u16 data)
419 {
420 struct ioc3 *ioc3 = ip->regs;
421 int phy = ip->phy;
422
423 while (ioc3->micr & MICR_BUSY);
424 ioc3->midr_w = data;
425 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
426 while (ioc3->micr & MICR_BUSY);
427 }
428
429 static int ioc3_mii_init(struct ioc3_private *ip);
430
ioc3_get_stats(struct net_device * dev)431 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
432 {
433 struct ioc3_private *ip = dev->priv;
434 struct ioc3 *ioc3 = ip->regs;
435
436 ip->stats.collisions += (ioc3->etcdc & ETCDC_COLLCNT_MASK);
437 return &ip->stats;
438 }
439
440 static inline void
ioc3_rx(struct ioc3_private * ip)441 ioc3_rx(struct ioc3_private *ip)
442 {
443 struct sk_buff *skb, *new_skb;
444 struct ioc3 *ioc3 = ip->regs;
445 int rx_entry, n_entry, len;
446 struct ioc3_erxbuf *rxb;
447 unsigned long *rxr;
448 u32 w0, err;
449
450 rxr = (unsigned long *) ip->rxr; /* Ring base */
451 rx_entry = ip->rx_ci; /* RX consume index */
452 n_entry = ip->rx_pi;
453
454 skb = ip->rx_skbs[rx_entry];
455 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
456 w0 = be32_to_cpu(rxb->w0);
457
458 while (w0 & ERXBUF_V) {
459 err = be32_to_cpu(rxb->err); /* It's valid ... */
460 if (err & ERXBUF_GOODPKT) {
461 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
462 skb_trim(skb, len);
463 skb->protocol = eth_type_trans(skb, ip->dev);
464
465 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
466 if (!new_skb) {
467 /* Ouch, drop packet and just recycle packet
468 to keep the ring filled. */
469 ip->stats.rx_dropped++;
470 new_skb = skb;
471 goto next;
472 }
473 netif_rx(skb);
474
475 ip->rx_skbs[rx_entry] = NULL; /* Poison */
476
477 new_skb->dev = ip->dev;
478
479 /* Because we reserve afterwards. */
480 skb_put(new_skb, (1664 + RX_OFFSET));
481 rxb = (struct ioc3_erxbuf *) new_skb->data;
482 skb_reserve(new_skb, RX_OFFSET);
483
484 ip->dev->last_rx = jiffies;
485 ip->stats.rx_packets++; /* Statistics */
486 ip->stats.rx_bytes += len;
487 } else {
488 /* The frame is invalid and the skb never
489 reached the network layer so we can just
490 recycle it. */
491 new_skb = skb;
492 ip->stats.rx_errors++;
493 }
494 if (err & ERXBUF_CRCERR) /* Statistics */
495 ip->stats.rx_crc_errors++;
496 if (err & ERXBUF_FRAMERR)
497 ip->stats.rx_frame_errors++;
498 next:
499 ip->rx_skbs[n_entry] = new_skb;
500 rxr[n_entry] = cpu_to_be64((0xa5UL << 56) |
501 ((unsigned long) rxb & TO_PHYS_MASK));
502 rxb->w0 = 0; /* Clear valid flag */
503 n_entry = (n_entry + 1) & 511; /* Update erpir */
504
505 /* Now go on to the next ring entry. */
506 rx_entry = (rx_entry + 1) & 511;
507 skb = ip->rx_skbs[rx_entry];
508 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
509 w0 = be32_to_cpu(rxb->w0);
510 }
511 ioc3->erpir = (n_entry << 3) | ERPIR_ARM;
512 ip->rx_pi = n_entry;
513 ip->rx_ci = rx_entry;
514 }
515
516 static inline void
ioc3_tx(struct ioc3_private * ip)517 ioc3_tx(struct ioc3_private *ip)
518 {
519 unsigned long packets, bytes;
520 struct ioc3 *ioc3 = ip->regs;
521 int tx_entry, o_entry;
522 struct sk_buff *skb;
523 u32 etcir;
524
525 spin_lock(&ip->ioc3_lock);
526 etcir = ioc3->etcir;
527
528 tx_entry = (etcir >> 7) & 127;
529 o_entry = ip->tx_ci;
530 packets = 0;
531 bytes = 0;
532
533 while (o_entry != tx_entry) {
534 packets++;
535 skb = ip->tx_skbs[o_entry];
536 bytes += skb->len;
537 dev_kfree_skb_irq(skb);
538 ip->tx_skbs[o_entry] = NULL;
539
540 o_entry = (o_entry + 1) & 127; /* Next */
541
542 etcir = ioc3->etcir; /* More pkts sent? */
543 tx_entry = (etcir >> 7) & 127;
544 }
545
546 ip->stats.tx_packets += packets;
547 ip->stats.tx_bytes += bytes;
548 ip->txqlen -= packets;
549
550 if (ip->txqlen < 128)
551 netif_wake_queue(ip->dev);
552
553 ip->tx_ci = o_entry;
554 spin_unlock(&ip->ioc3_lock);
555 }
556
557 /*
558 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
559 * software problems, so we should try to recover
560 * more gracefully if this ever happens. In theory we might be flooded
561 * with such error interrupts if something really goes wrong, so we might
562 * also consider to take the interface down.
563 */
564 static void
ioc3_error(struct ioc3_private * ip,u32 eisr)565 ioc3_error(struct ioc3_private *ip, u32 eisr)
566 {
567 struct net_device *dev = ip->dev;
568 unsigned char *iface = dev->name;
569
570 if (eisr & EISR_RXOFLO)
571 printk(KERN_ERR "%s: RX overflow.\n", iface);
572 if (eisr & EISR_RXBUFOFLO)
573 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
574 if (eisr & EISR_RXMEMERR)
575 printk(KERN_ERR "%s: RX PCI error.\n", iface);
576 if (eisr & EISR_RXPARERR)
577 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
578 if (eisr & EISR_TXBUFUFLO)
579 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
580 if (eisr & EISR_TXMEMERR)
581 printk(KERN_ERR "%s: TX PCI error.\n", iface);
582
583 ioc3_stop(ip);
584 ioc3_init(ip);
585 ioc3_mii_init(ip);
586
587 dev->trans_start = jiffies;
588 netif_wake_queue(dev);
589 }
590
591 /* The interrupt handler does all of the Rx thread work and cleans up
592 after the Tx thread. */
ioc3_interrupt(int irq,void * _dev,struct pt_regs * regs)593 static void ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
594 {
595 struct net_device *dev = (struct net_device *)_dev;
596 struct ioc3_private *ip = dev->priv;
597 struct ioc3 *ioc3 = ip->regs;
598 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
599 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
600 EISR_TXEXPLICIT | EISR_TXMEMERR;
601 u32 eisr;
602
603 eisr = ioc3->eisr & enabled;
604
605 while (eisr) {
606 ioc3->eisr = eisr;
607 ioc3->eisr; /* Flush */
608
609 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
610 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
611 ioc3_error(ip, eisr);
612 if (eisr & EISR_RXTIMERINT)
613 ioc3_rx(ip);
614 if (eisr & EISR_TXEXPLICIT)
615 ioc3_tx(ip);
616
617 eisr = ioc3->eisr & enabled;
618 }
619 }
620
621 /*
622 * Auto negotiation. The scheme is very simple. We have a timer routine that
623 * keeps watching the auto negotiation process as it progresses. The DP83840
624 * is first told to start doing it's thing, we set up the time and place the
625 * timer state machine in it's initial state.
626 *
627 * Here the timer peeks at the DP83840 status registers at each click to see
628 * if the auto negotiation has completed, we assume here that the DP83840 PHY
629 * will time out at some point and just tell us what (didn't) happen. For
630 * complete coverage we only allow so many of the ticks at this level to run,
631 * when this has expired we print a warning message and try another strategy.
632 * This "other" strategy is to force the interface into various speed/duplex
633 * configurations and we stop when we see a link-up condition before the
634 * maximum number of "peek" ticks have occurred.
635 *
636 * Once a valid link status has been detected we configure the IOC3 to speak
637 * the most efficient protocol we could get a clean link for. The priority
638 * for link configurations, highest first is:
639 *
640 * 100 Base-T Full Duplex
641 * 100 Base-T Half Duplex
642 * 10 Base-T Full Duplex
643 * 10 Base-T Half Duplex
644 *
645 * We start a new timer now, after a successful auto negotiation status has
646 * been detected. This timer just waits for the link-up bit to get set in
647 * the BMCR of the DP83840. When this occurs we print a kernel log message
648 * describing the link type in use and the fact that it is up.
649 *
650 * If a fatal error of some sort is signalled and detected in the interrupt
651 * service routine, and the chip is reset, or the link is ifconfig'd down
652 * and then back up, this entire process repeats itself all over again.
653 */
ioc3_try_next_permutation(struct ioc3_private * ip)654 static int ioc3_try_next_permutation(struct ioc3_private *ip)
655 {
656 ip->sw_bmcr = mii_read(ip, MII_BMCR);
657
658 /* Downgrade from full to half duplex. Only possible via ethtool. */
659 if (ip->sw_bmcr & BMCR_FULLDPLX) {
660 ip->sw_bmcr &= ~BMCR_FULLDPLX;
661 mii_write(ip, MII_BMCR, ip->sw_bmcr);
662
663 return 0;
664 }
665
666 /* Downgrade from 100 to 10. */
667 if (ip->sw_bmcr & BMCR_SPEED100) {
668 ip->sw_bmcr &= ~BMCR_SPEED100;
669 mii_write(ip, MII_BMCR, ip->sw_bmcr);
670
671 return 0;
672 }
673
674 /* We've tried everything. */
675 return -1;
676 }
677
678 static void
ioc3_display_link_mode(struct ioc3_private * ip)679 ioc3_display_link_mode(struct ioc3_private *ip)
680 {
681 char *tmode = "";
682
683 ip->sw_lpa = mii_read(ip, MII_LPA);
684
685 if (ip->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
686 if (ip->sw_lpa & LPA_100FULL)
687 tmode = "100Mb/s, Full Duplex";
688 else
689 tmode = "100Mb/s, Half Duplex";
690 } else {
691 if (ip->sw_lpa & LPA_10FULL)
692 tmode = "10Mb/s, Full Duplex";
693 else
694 tmode = "10Mb/s, Half Duplex";
695 }
696
697 printk(KERN_INFO "%s: Link is up at %s.\n", ip->dev->name, tmode);
698 }
699
700 static void
ioc3_display_forced_link_mode(struct ioc3_private * ip)701 ioc3_display_forced_link_mode(struct ioc3_private *ip)
702 {
703 char *speed = "", *duplex = "";
704
705 ip->sw_bmcr = mii_read(ip, MII_BMCR);
706 if (ip->sw_bmcr & BMCR_SPEED100)
707 speed = "100Mb/s, ";
708 else
709 speed = "10Mb/s, ";
710 if (ip->sw_bmcr & BMCR_FULLDPLX)
711 duplex = "Full Duplex.\n";
712 else
713 duplex = "Half Duplex.\n";
714
715 printk(KERN_INFO "%s: Link has been forced up at %s%s", ip->dev->name,
716 speed, duplex);
717 }
718
ioc3_set_link_modes(struct ioc3_private * ip)719 static int ioc3_set_link_modes(struct ioc3_private *ip)
720 {
721 struct ioc3 *ioc3 = ip->regs;
722 int full;
723
724 /*
725 * All we care about is making sure the bigmac tx_cfg has a
726 * proper duplex setting.
727 */
728 if (ip->timer_state == arbwait) {
729 ip->sw_lpa = mii_read(ip, MII_LPA);
730 if (!(ip->sw_lpa & (LPA_10HALF | LPA_10FULL |
731 LPA_100HALF | LPA_100FULL)))
732 goto no_response;
733 if (ip->sw_lpa & LPA_100FULL)
734 full = 1;
735 else if (ip->sw_lpa & LPA_100HALF)
736 full = 0;
737 else if (ip->sw_lpa & LPA_10FULL)
738 full = 1;
739 else
740 full = 0;
741 } else {
742 /* Forcing a link mode. */
743 ip->sw_bmcr = mii_read(ip, MII_BMCR);
744 if (ip->sw_bmcr & BMCR_FULLDPLX)
745 full = 1;
746 else
747 full = 0;
748 }
749
750 if (full)
751 ip->emcr |= EMCR_DUPLEX;
752 else
753 ip->emcr &= ~EMCR_DUPLEX;
754
755 ioc3->emcr = ip->emcr;
756 ioc3->emcr;
757
758 return 0;
759
760 no_response:
761
762 return 1;
763 }
764
is_lucent_phy(struct ioc3_private * ip)765 static int is_lucent_phy(struct ioc3_private *ip)
766 {
767 unsigned short mr2, mr3;
768 int ret = 0;
769
770 mr2 = mii_read(ip, MII_PHYSID1);
771 mr3 = mii_read(ip, MII_PHYSID2);
772 if ((mr2 & 0xffff) == 0x0180 && ((mr3 & 0xffff) >> 10) == 0x1d) {
773 ret = 1;
774 }
775
776 return ret;
777 }
778
ioc3_timer(unsigned long data)779 static void ioc3_timer(unsigned long data)
780 {
781 struct ioc3_private *ip = (struct ioc3_private *) data;
782 int restart_timer = 0;
783
784 ip->timer_ticks++;
785 switch (ip->timer_state) {
786 case arbwait:
787 /*
788 * Only allow for 5 ticks, thats 10 seconds and much too
789 * long to wait for arbitration to complete.
790 */
791 if (ip->timer_ticks >= 10) {
792 /* Enter force mode. */
793 do_force_mode:
794 ip->sw_bmcr = mii_read(ip, MII_BMCR);
795 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
796 " trying force link mode\n", ip->dev->name);
797 ip->sw_bmcr = BMCR_SPEED100;
798 mii_write(ip, MII_BMCR, ip->sw_bmcr);
799
800 if (!is_lucent_phy(ip)) {
801 /*
802 * OK, seems we need do disable the transceiver
803 * for the first tick to make sure we get an
804 * accurate link state at the second tick.
805 */
806 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
807 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
808 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
809 }
810 ip->timer_state = ltrywait;
811 ip->timer_ticks = 0;
812 restart_timer = 1;
813 } else {
814 /* Anything interesting happen? */
815 ip->sw_bmsr = mii_read(ip, MII_BMSR);
816 if (ip->sw_bmsr & BMSR_ANEGCOMPLETE) {
817 int ret;
818
819 /* Just what we've been waiting for... */
820 ret = ioc3_set_link_modes(ip);
821 if (ret) {
822 /* Ooops, something bad happened, go to
823 * force mode.
824 *
825 * XXX Broken hubs which don't support
826 * XXX 802.3u auto-negotiation make this
827 * XXX happen as well.
828 */
829 goto do_force_mode;
830 }
831
832 /*
833 * Success, at least so far, advance our state
834 * engine.
835 */
836 ip->timer_state = lupwait;
837 restart_timer = 1;
838 } else {
839 restart_timer = 1;
840 }
841 }
842 break;
843
844 case lupwait:
845 /*
846 * Auto negotiation was successful and we are awaiting a
847 * link up status. I have decided to let this timer run
848 * forever until some sort of error is signalled, reporting
849 * a message to the user at 10 second intervals.
850 */
851 ip->sw_bmsr = mii_read(ip, MII_BMSR);
852 if (ip->sw_bmsr & BMSR_LSTATUS) {
853 /*
854 * Wheee, it's up, display the link mode in use and put
855 * the timer to sleep.
856 */
857 ioc3_display_link_mode(ip);
858 ip->timer_state = asleep;
859 restart_timer = 0;
860 } else {
861 if (ip->timer_ticks >= 10) {
862 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
863 "not completely up.\n", ip->dev->name);
864 ip->timer_ticks = 0;
865 restart_timer = 1;
866 } else {
867 restart_timer = 1;
868 }
869 }
870 break;
871
872 case ltrywait:
873 /*
874 * Making the timeout here too long can make it take
875 * annoyingly long to attempt all of the link mode
876 * permutations, but then again this is essentially
877 * error recovery code for the most part.
878 */
879 ip->sw_bmsr = mii_read(ip, MII_BMSR);
880 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
881 if (ip->timer_ticks == 1) {
882 if (!is_lucent_phy(ip)) {
883 /*
884 * Re-enable transceiver, we'll re-enable the
885 * transceiver next tick, then check link state
886 * on the following tick.
887 */
888 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
889 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
890 }
891 restart_timer = 1;
892 break;
893 }
894 if (ip->timer_ticks == 2) {
895 if (!is_lucent_phy(ip)) {
896 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
897 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
898 }
899 restart_timer = 1;
900 break;
901 }
902 if (ip->sw_bmsr & BMSR_LSTATUS) {
903 /* Force mode selection success. */
904 ioc3_display_forced_link_mode(ip);
905 ioc3_set_link_modes(ip); /* XXX error? then what? */
906 ip->timer_state = asleep;
907 restart_timer = 0;
908 } else {
909 if (ip->timer_ticks >= 4) { /* 6 seconds or so... */
910 int ret;
911
912 ret = ioc3_try_next_permutation(ip);
913 if (ret == -1) {
914 /*
915 * Aieee, tried them all, reset the
916 * chip and try all over again.
917 */
918 printk(KERN_NOTICE "%s: Link down, "
919 "cable problem?\n",
920 ip->dev->name);
921
922 ioc3_init(ip);
923 return;
924 }
925 if (!is_lucent_phy(ip)) {
926 ip->sw_csconfig = mii_read(ip,
927 MII_CSCONFIG);
928 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
929 mii_write(ip, MII_CSCONFIG,
930 ip->sw_csconfig);
931 }
932 ip->timer_ticks = 0;
933 restart_timer = 1;
934 } else {
935 restart_timer = 1;
936 }
937 }
938 break;
939
940 case asleep:
941 default:
942 /* Can't happens.... */
943 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
944 "one anyways!\n", ip->dev->name);
945 restart_timer = 0;
946 ip->timer_ticks = 0;
947 ip->timer_state = asleep; /* foo on you */
948 break;
949 };
950
951 if (restart_timer) {
952 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
953 add_timer(&ip->ioc3_timer);
954 }
955 }
956
957 static void
ioc3_start_auto_negotiation(struct ioc3_private * ip,struct ethtool_cmd * ep)958 ioc3_start_auto_negotiation(struct ioc3_private *ip, struct ethtool_cmd *ep)
959 {
960 int timeout;
961
962 /* Read all of the registers we are interested in now. */
963 ip->sw_bmsr = mii_read(ip, MII_BMSR);
964 ip->sw_bmcr = mii_read(ip, MII_BMCR);
965 ip->sw_physid1 = mii_read(ip, MII_PHYSID1);
966 ip->sw_physid2 = mii_read(ip, MII_PHYSID2);
967
968 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
969
970 ip->sw_advertise = mii_read(ip, MII_ADVERTISE);
971 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
972 /* Advertise everything we can support. */
973 if (ip->sw_bmsr & BMSR_10HALF)
974 ip->sw_advertise |= ADVERTISE_10HALF;
975 else
976 ip->sw_advertise &= ~ADVERTISE_10HALF;
977
978 if (ip->sw_bmsr & BMSR_10FULL)
979 ip->sw_advertise |= ADVERTISE_10FULL;
980 else
981 ip->sw_advertise &= ~ADVERTISE_10FULL;
982 if (ip->sw_bmsr & BMSR_100HALF)
983 ip->sw_advertise |= ADVERTISE_100HALF;
984 else
985 ip->sw_advertise &= ~ADVERTISE_100HALF;
986 if (ip->sw_bmsr & BMSR_100FULL)
987 ip->sw_advertise |= ADVERTISE_100FULL;
988 else
989 ip->sw_advertise &= ~ADVERTISE_100FULL;
990 mii_write(ip, MII_ADVERTISE, ip->sw_advertise);
991
992 /*
993 * XXX Currently no IOC3 card I know off supports 100BaseT4,
994 * XXX and this is because the DP83840 does not support it,
995 * XXX changes XXX would need to be made to the tx/rx logic in
996 * XXX the driver as well so I completely skip checking for it
997 * XXX in the BMSR for now.
998 */
999
1000 #ifdef AUTO_SWITCH_DEBUG
1001 ASD(("%s: Advertising [ ", ip->dev->name));
1002 if (ip->sw_advertise & ADVERTISE_10HALF)
1003 ASD(("10H "));
1004 if (ip->sw_advertise & ADVERTISE_10FULL)
1005 ASD(("10F "));
1006 if (ip->sw_advertise & ADVERTISE_100HALF)
1007 ASD(("100H "));
1008 if (ip->sw_advertise & ADVERTISE_100FULL)
1009 ASD(("100F "));
1010 #endif
1011
1012 /* Enable Auto-Negotiation, this is usually on already... */
1013 ip->sw_bmcr |= BMCR_ANENABLE;
1014 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1015
1016 /* Restart it to make sure it is going. */
1017 ip->sw_bmcr |= BMCR_ANRESTART;
1018 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1019
1020 /* BMCR_ANRESTART self clears when the process has begun. */
1021
1022 timeout = 64; /* More than enough. */
1023 while (--timeout) {
1024 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1025 if (!(ip->sw_bmcr & BMCR_ANRESTART))
1026 break; /* got it. */
1027 udelay(10);
1028 }
1029 if (!timeout) {
1030 printk(KERN_ERR "%s: IOC3 would not start auto "
1031 "negotiation BMCR=0x%04x\n",
1032 ip->dev->name, ip->sw_bmcr);
1033 printk(KERN_NOTICE "%s: Performing force link "
1034 "detection.\n", ip->dev->name);
1035 goto force_link;
1036 } else {
1037 ip->timer_state = arbwait;
1038 }
1039 } else {
1040 force_link:
1041 /*
1042 * Force the link up, trying first a particular mode. Either
1043 * we are here at the request of ethtool or because the IOC3
1044 * would not start to autoneg.
1045 */
1046
1047 /*
1048 * Disable auto-negotiation in BMCR, enable the duplex and
1049 * speed setting, init the timer state machine, and fire it off.
1050 */
1051 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1052 ip->sw_bmcr = BMCR_SPEED100;
1053 } else {
1054 if (ep->speed == SPEED_100)
1055 ip->sw_bmcr = BMCR_SPEED100;
1056 else
1057 ip->sw_bmcr = 0;
1058 if (ep->duplex == DUPLEX_FULL)
1059 ip->sw_bmcr |= BMCR_FULLDPLX;
1060 }
1061 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1062
1063 if (!is_lucent_phy(ip)) {
1064 /*
1065 * OK, seems we need do disable the transceiver for the
1066 * first tick to make sure we get an accurate link
1067 * state at the second tick.
1068 */
1069 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
1070 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1071 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
1072 }
1073 ip->timer_state = ltrywait;
1074 }
1075
1076 del_timer(&ip->ioc3_timer);
1077 ip->timer_ticks = 0;
1078 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1079 ip->ioc3_timer.data = (unsigned long) ip;
1080 ip->ioc3_timer.function = &ioc3_timer;
1081 add_timer(&ip->ioc3_timer);
1082 }
1083
ioc3_mii_init(struct ioc3_private * ip)1084 static int ioc3_mii_init(struct ioc3_private *ip)
1085 {
1086 int i, found;
1087 u16 word;
1088
1089 found = 0;
1090 spin_lock_irq(&ip->ioc3_lock);
1091 for (i = 0; i < 32; i++) {
1092 ip->phy = i;
1093 word = mii_read(ip, 2);
1094 if ((word != 0xffff) && (word != 0x0000)) {
1095 found = 1;
1096 break; /* Found a PHY */
1097 }
1098 }
1099 if (!found) {
1100 spin_unlock_irq(&ip->ioc3_lock);
1101 return -ENODEV;
1102 }
1103
1104 ioc3_start_auto_negotiation(ip, NULL); // XXX ethtool
1105
1106 spin_unlock_irq(&ip->ioc3_lock);
1107
1108 return 0;
1109 }
1110
1111 static inline void
ioc3_clean_rx_ring(struct ioc3_private * ip)1112 ioc3_clean_rx_ring(struct ioc3_private *ip)
1113 {
1114 struct sk_buff *skb;
1115 int i;
1116
1117 for (i = ip->rx_ci; i & 15; i++) {
1118 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
1119 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
1120 }
1121 ip->rx_pi &= 511;
1122 ip->rx_ci &= 511;
1123
1124 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
1125 struct ioc3_erxbuf *rxb;
1126 skb = ip->rx_skbs[i];
1127 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
1128 rxb->w0 = 0;
1129 }
1130 }
1131
1132 static inline void
ioc3_clean_tx_ring(struct ioc3_private * ip)1133 ioc3_clean_tx_ring(struct ioc3_private *ip)
1134 {
1135 struct sk_buff *skb;
1136 int i;
1137
1138 for (i=0; i < 128; i++) {
1139 skb = ip->tx_skbs[i];
1140 if (skb) {
1141 ip->tx_skbs[i] = NULL;
1142 dev_kfree_skb_any(skb);
1143 }
1144 ip->txr[i].cmd = 0;
1145 }
1146 ip->tx_pi = 0;
1147 ip->tx_ci = 0;
1148 }
1149
1150 static void
ioc3_free_rings(struct ioc3_private * ip)1151 ioc3_free_rings(struct ioc3_private *ip)
1152 {
1153 struct sk_buff *skb;
1154 int rx_entry, n_entry;
1155
1156 if (ip->txr) {
1157 ioc3_clean_tx_ring(ip);
1158 free_pages((unsigned long)ip->txr, 2);
1159 ip->txr = NULL;
1160 }
1161
1162 if (ip->rxr) {
1163 n_entry = ip->rx_ci;
1164 rx_entry = ip->rx_pi;
1165
1166 while (n_entry != rx_entry) {
1167 skb = ip->rx_skbs[n_entry];
1168 if (skb)
1169 dev_kfree_skb_any(skb);
1170
1171 n_entry = (n_entry + 1) & 511;
1172 }
1173 free_page((unsigned long)ip->rxr);
1174 ip->rxr = NULL;
1175 }
1176 }
1177
1178 static void
ioc3_alloc_rings(struct net_device * dev,struct ioc3_private * ip,struct ioc3 * ioc3)1179 ioc3_alloc_rings(struct net_device *dev, struct ioc3_private *ip,
1180 struct ioc3 *ioc3)
1181 {
1182 struct ioc3_erxbuf *rxb;
1183 unsigned long *rxr;
1184 int i;
1185
1186 if (ip->rxr == NULL) {
1187 /* Allocate and initialize rx ring. 4kb = 512 entries */
1188 ip->rxr = (unsigned long *) get_free_page(GFP_ATOMIC);
1189 rxr = (unsigned long *) ip->rxr;
1190 if (!rxr)
1191 printk("ioc3_alloc_rings(): get_free_page() failed!\n");
1192
1193 /* Now the rx buffers. The RX ring may be larger but
1194 we only allocate 16 buffers for now. Need to tune
1195 this for performance and memory later. */
1196 for (i = 0; i < RX_BUFFS; i++) {
1197 struct sk_buff *skb;
1198
1199 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1200 if (!skb) {
1201 show_free_areas();
1202 continue;
1203 }
1204
1205 ip->rx_skbs[i] = skb;
1206 skb->dev = dev;
1207
1208 /* Because we reserve afterwards. */
1209 skb_put(skb, (1664 + RX_OFFSET));
1210 rxb = (struct ioc3_erxbuf *) skb->data;
1211 rxr[i] = cpu_to_be64((0xa5UL << 56) |
1212 ((unsigned long) rxb & TO_PHYS_MASK));
1213 skb_reserve(skb, RX_OFFSET);
1214 }
1215 ip->rx_ci = 0;
1216 ip->rx_pi = RX_BUFFS;
1217 }
1218
1219 if (ip->txr == NULL) {
1220 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
1221 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
1222 if (!ip->txr)
1223 printk("ioc3_alloc_rings(): get_free_page() failed!\n");
1224 ip->tx_pi = 0;
1225 ip->tx_ci = 0;
1226 }
1227 }
1228
1229 static void
ioc3_init_rings(struct net_device * dev,struct ioc3_private * ip,struct ioc3 * ioc3)1230 ioc3_init_rings(struct net_device *dev, struct ioc3_private *ip,
1231 struct ioc3 *ioc3)
1232 {
1233 unsigned long ring;
1234
1235 ioc3_free_rings(ip);
1236 ioc3_alloc_rings(dev, ip, ioc3);
1237
1238 ioc3_clean_rx_ring(ip);
1239 ioc3_clean_tx_ring(ip);
1240
1241 /* Now the rx ring base, consume & produce registers. */
1242 ring = (0xa5UL << 56) | ((unsigned long)ip->rxr & TO_PHYS_MASK);
1243 ioc3->erbr_h = ring >> 32;
1244 ioc3->erbr_l = ring & 0xffffffff;
1245 ioc3->ercir = (ip->rx_ci << 3);
1246 ioc3->erpir = (ip->rx_pi << 3) | ERPIR_ARM;
1247
1248 ring = (0xa5UL << 56) | ((unsigned long)ip->txr & TO_PHYS_MASK);
1249
1250 ip->txqlen = 0; /* nothing queued */
1251
1252 /* Now the tx ring base, consume & produce registers. */
1253 ioc3->etbr_h = ring >> 32;
1254 ioc3->etbr_l = ring & 0xffffffff;
1255 ioc3->etpir = (ip->tx_pi << 7);
1256 ioc3->etcir = (ip->tx_ci << 7);
1257 ioc3->etcir; /* Flush */
1258 }
1259
1260 static inline void
ioc3_ssram_disc(struct ioc3_private * ip)1261 ioc3_ssram_disc(struct ioc3_private *ip)
1262 {
1263 struct ioc3 *ioc3 = ip->regs;
1264 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
1265 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
1266 unsigned int pattern = 0x5555;
1267
1268 /* Assume the larger size SSRAM and enable parity checking */
1269 ioc3->emcr |= (EMCR_BUFSIZ | EMCR_RAMPAR);
1270
1271 *ssram0 = pattern;
1272 *ssram1 = ~pattern & IOC3_SSRAM_DM;
1273
1274 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
1275 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
1276 /* set ssram size to 64 KB */
1277 ip->emcr = EMCR_RAMPAR;
1278 ioc3->emcr &= ~EMCR_BUFSIZ;
1279 } else {
1280 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
1281 }
1282 }
1283
ioc3_init(struct ioc3_private * ip)1284 static void ioc3_init(struct ioc3_private *ip)
1285 {
1286 struct net_device *dev = ip->dev;
1287 struct ioc3 *ioc3 = ip->regs;
1288
1289 del_timer(&ip->ioc3_timer); /* Kill if running */
1290
1291 ioc3->emcr = EMCR_RST; /* Reset */
1292 ioc3->emcr; /* Flush WB */
1293 udelay(4); /* Give it time ... */
1294 ioc3->emcr = 0;
1295 ioc3->emcr;
1296
1297 /* Misc registers */
1298 ioc3->erbar = 0;
1299 ioc3->etcsr = (17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21;
1300 ioc3->etcdc; /* Clear on read */
1301 ioc3->ercsr = 15; /* RX low watermark */
1302 ioc3->ertr = 0; /* Interrupt immediately */
1303 ioc3->emar_h = (dev->dev_addr[5] << 8) | dev->dev_addr[4];
1304 ioc3->emar_l = (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
1305 (dev->dev_addr[1] << 8) | dev->dev_addr[0];
1306 ioc3->ehar_h = ip->ehar_h;
1307 ioc3->ehar_l = ip->ehar_l;
1308 ioc3->ersr = 42; /* XXX should be random */
1309
1310 ioc3_init_rings(ip->dev, ip, ioc3);
1311
1312 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1313 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN;
1314 ioc3->emcr = ip->emcr;
1315 ioc3->eier = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1316 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1317 EISR_TXEXPLICIT | EISR_TXMEMERR;
1318 ioc3->eier;
1319 }
1320
ioc3_stop(struct ioc3_private * ip)1321 static inline void ioc3_stop(struct ioc3_private *ip)
1322 {
1323 struct ioc3 *ioc3 = ip->regs;
1324
1325 ioc3->emcr = 0; /* Shutup */
1326 ioc3->eier = 0; /* Disable interrupts */
1327 ioc3->eier; /* Flush */
1328 }
1329
1330 static int
ioc3_open(struct net_device * dev)1331 ioc3_open(struct net_device *dev)
1332 {
1333 struct ioc3_private *ip = dev->priv;
1334
1335 if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
1336 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1337
1338 return -EAGAIN;
1339 }
1340
1341 ip->ehar_h = 0;
1342 ip->ehar_l = 0;
1343 ioc3_init(ip);
1344
1345 netif_start_queue(dev);
1346 return 0;
1347 }
1348
1349 static int
ioc3_close(struct net_device * dev)1350 ioc3_close(struct net_device *dev)
1351 {
1352 struct ioc3_private *ip = dev->priv;
1353
1354 del_timer(&ip->ioc3_timer);
1355
1356 netif_stop_queue(dev);
1357
1358 ioc3_stop(ip);
1359 free_irq(dev->irq, dev);
1360
1361 ioc3_free_rings(ip);
1362 return 0;
1363 }
1364
1365 /*
1366 * MENET cards have four IOC3 chips, which are attached to two sets of
1367 * PCI slot resources each: the primary connections are on slots
1368 * 0..3 and the secondaries are on 4..7
1369 *
1370 * All four ethernets are brought out to connectors; six serial ports
1371 * (a pair from each of the first three IOC3s) are brought out to
1372 * MiniDINs; all other subdevices are left swinging in the wind, leave
1373 * them disabled.
1374 */
ioc3_is_menet(struct pci_dev * pdev)1375 static inline int ioc3_is_menet(struct pci_dev *pdev)
1376 {
1377 struct pci_dev *dev;
1378
1379 return pdev->bus->parent == NULL
1380 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(0, 0)))
1381 && dev->vendor == PCI_VENDOR_ID_SGI
1382 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1383 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(1, 0)))
1384 && dev->vendor == PCI_VENDOR_ID_SGI
1385 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1386 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(2, 0)))
1387 && dev->vendor == PCI_VENDOR_ID_SGI
1388 && dev->device == PCI_DEVICE_ID_SGI_IOC3;
1389 }
1390
ioc3_serial_probe(struct pci_dev * pdev,struct ioc3 * ioc3)1391 static void inline ioc3_serial_probe(struct pci_dev *pdev,
1392 struct ioc3 *ioc3)
1393 {
1394 struct serial_struct req;
1395
1396 /*
1397 * We need to recognice and treat the fourth MENET serial as it
1398 * does not have an SuperIO chip attached to it, therefore attempting
1399 * to access it will result in bus errors. We call something an
1400 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1401 * in it. This is paranoid but we want to avoid blowing up on a
1402 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1403 * not paranoid enough ...
1404 */
1405 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1406 return;
1407
1408 /* Register to interrupt zero because we share the interrupt with
1409 the serial driver which we don't properly support yet. */
1410 memset(&req, 0, sizeof(req));
1411 req.irq = 0;
1412 req.flags = IOC3_COM_FLAGS;
1413 req.io_type = SERIAL_IO_MEM;
1414 req.iomem_reg_shift = 0;
1415 req.baud_base = IOC3_BAUD;
1416
1417 req.iomem_base = (unsigned char *) &ioc3->sregs.uarta;
1418 register_serial(&req);
1419
1420 req.iomem_base = (unsigned char *) &ioc3->sregs.uartb;
1421 register_serial(&req);
1422 }
1423
ioc3_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1424 static int __devinit ioc3_probe(struct pci_dev *pdev,
1425 const struct pci_device_id *ent)
1426 {
1427 struct net_device *dev = NULL;
1428 struct ioc3_private *ip;
1429 struct ioc3 *ioc3;
1430 unsigned long ioc3_base, ioc3_size;
1431 u32 vendor, model, rev;
1432 int err;
1433
1434 dev = alloc_etherdev(sizeof(struct ioc3_private));
1435 if (!dev)
1436 return -ENOMEM;
1437
1438 err = pci_request_regions(pdev, "ioc3");
1439 if (err)
1440 goto out_free;
1441
1442 SET_MODULE_OWNER(dev);
1443 ip = dev->priv;
1444 ip->dev = dev;
1445
1446 dev->irq = pdev->irq;
1447
1448 ioc3_base = pci_resource_start(pdev, 0);
1449 ioc3_size = pci_resource_len(pdev, 0);
1450 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1451 if (!ioc3) {
1452 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1453 pdev->slot_name);
1454 err = -ENOMEM;
1455 goto out_res;
1456 }
1457 ip->regs = ioc3;
1458
1459 #ifdef CONFIG_SERIAL
1460 ioc3_serial_probe(pdev, ioc3);
1461 #endif
1462
1463 spin_lock_init(&ip->ioc3_lock);
1464 init_timer(&ip->ioc3_timer);
1465
1466 ioc3_stop(ip);
1467 ioc3_init(ip);
1468
1469 ioc3_mii_init(ip);
1470
1471 if (ip->phy == -1) {
1472 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1473 pdev->slot_name);
1474 err = -ENODEV;
1475 goto out_stop;
1476 }
1477
1478 ioc3_ssram_disc(ip);
1479 ioc3_get_eaddr(ip);
1480
1481 /* The IOC3-specific entries in the device structure. */
1482 dev->open = ioc3_open;
1483 dev->hard_start_xmit = ioc3_start_xmit;
1484 dev->tx_timeout = ioc3_timeout;
1485 dev->watchdog_timeo = 5 * HZ;
1486 dev->stop = ioc3_close;
1487 dev->get_stats = ioc3_get_stats;
1488 dev->do_ioctl = ioc3_ioctl;
1489 dev->set_multicast_list = ioc3_set_multicast_list;
1490
1491 err = register_netdev(dev);
1492 if (err)
1493 goto out_stop;
1494
1495 vendor = (ip->sw_physid1 << 12) | (ip->sw_physid2 >> 4);
1496 model = (ip->sw_physid2 >> 4) & 0x3f;
1497 rev = ip->sw_physid2 & 0xf;
1498 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1499 "rev %d.\n", dev->name, ip->phy, vendor, model, rev);
1500 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1501 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1502
1503 return 0;
1504
1505 out_stop:
1506 ioc3_stop(ip);
1507 free_irq(dev->irq, dev);
1508 ioc3_free_rings(ip);
1509 out_res:
1510 pci_release_regions(pdev);
1511 out_free:
1512 kfree(dev);
1513 return err;
1514 }
1515
ioc3_remove_one(struct pci_dev * pdev)1516 static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1517 {
1518 struct net_device *dev = pci_get_drvdata(pdev);
1519 struct ioc3_private *ip = dev->priv;
1520 struct ioc3 *ioc3 = ip->regs;
1521
1522 unregister_netdev(dev);
1523 iounmap(ioc3);
1524 pci_release_regions(pdev);
1525 kfree(dev);
1526 }
1527
1528 static struct pci_device_id ioc3_pci_tbl[] __devinitdata = {
1529 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1530 { 0 }
1531 };
1532 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1533
1534 static struct pci_driver ioc3_driver = {
1535 .name = "ioc3-eth",
1536 .id_table = ioc3_pci_tbl,
1537 .probe = ioc3_probe,
1538 .remove = __devexit_p(ioc3_remove_one),
1539 };
1540
ioc3_init_module(void)1541 static int __init ioc3_init_module(void)
1542 {
1543 return pci_module_init(&ioc3_driver);
1544 }
1545
ioc3_cleanup_module(void)1546 static void __exit ioc3_cleanup_module(void)
1547 {
1548 pci_unregister_driver(&ioc3_driver);
1549 }
1550
1551 static int
ioc3_start_xmit(struct sk_buff * skb,struct net_device * dev)1552 ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1553 {
1554 unsigned long data;
1555 struct ioc3_private *ip = dev->priv;
1556 struct ioc3 *ioc3 = ip->regs;
1557 unsigned int len;
1558 struct ioc3_etxd *desc;
1559 int produce;
1560
1561 spin_lock_irq(&ip->ioc3_lock);
1562
1563 data = (unsigned long) skb->data;
1564 len = skb->len;
1565
1566 produce = ip->tx_pi;
1567 desc = &ip->txr[produce];
1568
1569 if (len <= 104) {
1570 /* Short packet, let's copy it directly into the ring. */
1571 memcpy(desc->data, skb->data, skb->len);
1572 if (len < ETH_ZLEN) {
1573 /* Very short packet, pad with zeros at the end. */
1574 memset(desc->data + len, 0, ETH_ZLEN - len);
1575 len = ETH_ZLEN;
1576 }
1577 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V);
1578 desc->bufcnt = cpu_to_be32(len);
1579 } else if ((data ^ (data + len)) & 0x4000) {
1580 unsigned long b2, s1, s2;
1581
1582 b2 = (data | 0x3fffUL) + 1UL;
1583 s1 = b2 - data;
1584 s2 = data + len - b2;
1585
1586 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1587 ETXD_B1V | ETXD_B2V);
1588 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT)
1589 | (s2 << ETXD_B2CNT_SHIFT));
1590 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1591 (data & TO_PHYS_MASK));
1592 desc->p2 = cpu_to_be64((0xa5UL << 56) |
1593 (data & TO_PHYS_MASK));
1594 } else {
1595 /* Normal sized packet that doesn't cross a page boundary. */
1596 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V);
1597 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1598 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1599 (data & TO_PHYS_MASK));
1600 }
1601
1602 BARRIER();
1603
1604 dev->trans_start = jiffies;
1605 ip->tx_skbs[produce] = skb; /* Remember skb */
1606 produce = (produce + 1) & 127;
1607 ip->tx_pi = produce;
1608 ioc3->etpir = produce << 7; /* Fire ... */
1609
1610 ip->txqlen++;
1611
1612 if (ip->txqlen > 127)
1613 netif_stop_queue(dev);
1614
1615 spin_unlock_irq(&ip->ioc3_lock);
1616
1617 return 0;
1618 }
1619
ioc3_timeout(struct net_device * dev)1620 static void ioc3_timeout(struct net_device *dev)
1621 {
1622 struct ioc3_private *ip = dev->priv;
1623
1624 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1625
1626 ioc3_stop(ip);
1627 ioc3_init(ip);
1628 ioc3_mii_init(ip);
1629
1630 dev->trans_start = jiffies;
1631 netif_wake_queue(dev);
1632 }
1633
1634 /*
1635 * Given a multicast ethernet address, this routine calculates the
1636 * address's bit index in the logical address filter mask
1637 */
1638
1639 static inline unsigned int
ioc3_hash(const unsigned char * addr)1640 ioc3_hash(const unsigned char *addr)
1641 {
1642 unsigned int temp = 0;
1643 unsigned char byte;
1644 u32 crc;
1645 int bits;
1646
1647 crc = ether_crc_le(ETH_ALEN, addr);
1648
1649 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1650 for (bits = 6; --bits >= 0; ) {
1651 temp <<= 1;
1652 temp |= (crc & 0x1);
1653 crc >>= 1;
1654 }
1655
1656 return temp;
1657 }
1658
1659
1660 /* We provide both the mii-tools and the ethtool ioctls. */
ioc3_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1661 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1662 {
1663 struct ioc3_private *ip = dev->priv;
1664 struct ethtool_cmd *ep_user = (struct ethtool_cmd *) rq->ifr_data;
1665 u16 *data = (u16 *)&rq->ifr_data;
1666 struct ioc3 *ioc3 = ip->regs;
1667 struct ethtool_cmd ecmd;
1668
1669 switch (cmd) {
1670 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1671 if (ip->phy == -1)
1672 return -ENODEV;
1673 data[0] = ip->phy;
1674 return 0;
1675
1676 case SIOCGMIIREG: { /* Read a PHY register. */
1677 unsigned int phy = data[0];
1678 unsigned int reg = data[1];
1679
1680 if (phy > 0x1f || reg > 0x1f)
1681 return -EINVAL;
1682
1683 spin_lock_irq(&ip->ioc3_lock);
1684 while (ioc3->micr & MICR_BUSY);
1685 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
1686 while (ioc3->micr & MICR_BUSY);
1687 data[3] = (ioc3->midr_r & MIDR_DATA_MASK);
1688 spin_unlock_irq(&ip->ioc3_lock);
1689
1690 return 0;
1691
1692 case SIOCSMIIREG: /* Write a PHY register. */
1693 phy = data[0];
1694 reg = data[1];
1695
1696 if (!capable(CAP_NET_ADMIN))
1697 return -EPERM;
1698
1699 if (phy > 0x1f || reg > 0x1f)
1700 return -EINVAL;
1701
1702 spin_lock_irq(&ip->ioc3_lock);
1703 while (ioc3->micr & MICR_BUSY);
1704 ioc3->midr_w = data[2];
1705 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
1706 while (ioc3->micr & MICR_BUSY);
1707 spin_unlock_irq(&ip->ioc3_lock);
1708
1709 return 0;
1710 }
1711 case SIOCETHTOOL:
1712 if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
1713 return -EFAULT;
1714
1715 if (ecmd.cmd == ETHTOOL_GSET) {
1716 ecmd.supported =
1717 (SUPPORTED_10baseT_Half |
1718 SUPPORTED_10baseT_Full |
1719 SUPPORTED_100baseT_Half |
1720 SUPPORTED_100baseT_Full | SUPPORTED_Autoneg |
1721 SUPPORTED_TP | SUPPORTED_MII);
1722
1723 ecmd.port = PORT_TP;
1724 ecmd.transceiver = XCVR_INTERNAL;
1725 ecmd.phy_address = ip->phy;
1726
1727 /* Record PHY settings. */
1728 spin_lock_irq(&ip->ioc3_lock);
1729 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1730 ip->sw_lpa = mii_read(ip, MII_LPA);
1731 spin_unlock_irq(&ip->ioc3_lock);
1732 if (ip->sw_bmcr & BMCR_ANENABLE) {
1733 ecmd.autoneg = AUTONEG_ENABLE;
1734 ecmd.speed = (ip->sw_lpa &
1735 (LPA_100HALF | LPA_100FULL)) ?
1736 SPEED_100 : SPEED_10;
1737 if (ecmd.speed == SPEED_100)
1738 ecmd.duplex = (ip->sw_lpa & (LPA_100FULL)) ?
1739 DUPLEX_FULL : DUPLEX_HALF;
1740 else
1741 ecmd.duplex = (ip->sw_lpa & (LPA_10FULL)) ?
1742 DUPLEX_FULL : DUPLEX_HALF;
1743 } else {
1744 ecmd.autoneg = AUTONEG_DISABLE;
1745 ecmd.speed = (ip->sw_bmcr & BMCR_SPEED100) ?
1746 SPEED_100 : SPEED_10;
1747 ecmd.duplex = (ip->sw_bmcr & BMCR_FULLDPLX) ?
1748 DUPLEX_FULL : DUPLEX_HALF;
1749 }
1750 if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
1751 return -EFAULT;
1752 return 0;
1753 } else if (ecmd.cmd == ETHTOOL_SSET) {
1754 /* Verify the settings we care about. */
1755 if (ecmd.autoneg != AUTONEG_ENABLE &&
1756 ecmd.autoneg != AUTONEG_DISABLE)
1757 return -EINVAL;
1758
1759 if (ecmd.autoneg == AUTONEG_DISABLE &&
1760 ((ecmd.speed != SPEED_100 &&
1761 ecmd.speed != SPEED_10) ||
1762 (ecmd.duplex != DUPLEX_HALF &&
1763 ecmd.duplex != DUPLEX_FULL)))
1764 return -EINVAL;
1765
1766 /* Ok, do it to it. */
1767 del_timer(&ip->ioc3_timer);
1768 spin_lock_irq(&ip->ioc3_lock);
1769 ioc3_start_auto_negotiation(ip, &ecmd);
1770 spin_unlock_irq(&ip->ioc3_lock);
1771
1772 return 0;
1773 } else
1774 default:
1775 return -EOPNOTSUPP;
1776 }
1777
1778 return -EOPNOTSUPP;
1779 }
1780
ioc3_set_multicast_list(struct net_device * dev)1781 static void ioc3_set_multicast_list(struct net_device *dev)
1782 {
1783 struct dev_mc_list *dmi = dev->mc_list;
1784 struct ioc3_private *ip = dev->priv;
1785 struct ioc3 *ioc3 = ip->regs;
1786 u64 ehar = 0;
1787 int i;
1788
1789 netif_stop_queue(dev); /* Lock out others. */
1790
1791 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1792 /* Unconditionally log net taps. */
1793 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1794 ip->emcr |= EMCR_PROMISC;
1795 ioc3->emcr = ip->emcr;
1796 ioc3->emcr;
1797 } else {
1798 ip->emcr &= ~EMCR_PROMISC;
1799 ioc3->emcr = ip->emcr; /* Clear promiscuous. */
1800 ioc3->emcr;
1801
1802 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1803 /* Too many for hashing to make sense or we want all
1804 multicast packets anyway, so skip computing all the
1805 hashes and just accept all packets. */
1806 ip->ehar_h = 0xffffffff;
1807 ip->ehar_l = 0xffffffff;
1808 } else {
1809 for (i = 0; i < dev->mc_count; i++) {
1810 char *addr = dmi->dmi_addr;
1811 dmi = dmi->next;
1812
1813 if (!(*addr & 1))
1814 continue;
1815
1816 ehar |= (1UL << ioc3_hash(addr));
1817 }
1818 ip->ehar_h = ehar >> 32;
1819 ip->ehar_l = ehar & 0xffffffff;
1820 }
1821 ioc3->ehar_h = ip->ehar_h;
1822 ioc3->ehar_l = ip->ehar_l;
1823 }
1824
1825 netif_wake_queue(dev); /* Let us get going again. */
1826 }
1827
1828 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1829 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1830 MODULE_LICENSE("GPL");
1831
1832 module_init(ioc3_init_module);
1833 module_exit(ioc3_cleanup_module);
1834