1 /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4
2 *
3 * (c) Copyright 1998 Red Hat Software Inc
4 * Written by Alan Cox.
5 * Further debugging by Carl Drougge.
6 * Modified by Richard Procter (rnp@netlink.co.nz)
7 *
8 * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
9 * (for the MCA stuff) written by Wim Dumon.
10 *
11 * Thanks to 3Com for making this possible by providing me with the
12 * documentation.
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 *
17 */
18
19 #define DRV_NAME "3c527"
20 #define DRV_VERSION "0.6a"
21 #define DRV_RELDATE "2001/11/17"
22
23 static const char *version =
24 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Proctor (rnp@netlink.co.nz)\n";
25
26 /**
27 * DOC: Traps for the unwary
28 *
29 * The diagram (Figure 1-1) and the POS summary disagree with the
30 * "Interrupt Level" section in the manual.
31 *
32 * The manual contradicts itself when describing the minimum number
33 * buffers in the 'configure lists' command.
34 * My card accepts a buffer config of 4/4.
35 *
36 * Setting the SAV BP bit does not save bad packets, but
37 * only enables RX on-card stats collection.
38 *
39 * The documentation in places seems to miss things. In actual fact
40 * I've always eventually found everything is documented, it just
41 * requires careful study.
42 *
43 * DOC: Theory Of Operation
44 *
45 * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
46 * amount of on board intelligence that housekeeps a somewhat dumber
47 * Intel NIC. For performance we want to keep the transmit queue deep
48 * as the card can transmit packets while fetching others from main
49 * memory by bus master DMA. Transmission and reception are driven by
50 * circular buffer queues.
51 *
52 * The mailboxes can be used for controlling how the card traverses
53 * its buffer rings, but are used only for inital setup in this
54 * implementation. The exec mailbox allows a variety of commands to
55 * be executed. Each command must complete before the next is
56 * executed. Primarily we use the exec mailbox for controlling the
57 * multicast lists. We have to do a certain amount of interesting
58 * hoop jumping as the multicast list changes can occur in interrupt
59 * state when the card has an exec command pending. We defer such
60 * events until the command completion interrupt.
61 *
62 * A copy break scheme (taken from 3c59x.c) is employed whereby
63 * received frames exceeding a configurable length are passed
64 * directly to the higher networking layers without incuring a copy,
65 * in what amounts to a time/space trade-off.
66 *
67 * The card also keeps a large amount of statistical information
68 * on-board. In a perfect world, these could be used safely at no
69 * cost. However, lacking information to the contrary, processing
70 * them without races would involve so much extra complexity as to
71 * make it unworthwhile to do so. In the end, a hybrid SW/HW
72 * implementation was made necessary --- see mc32_update_stats().
73 *
74 * DOC: Notes
75 *
76 * It should be possible to use two or more cards, but at this stage
77 * only by loading two copies of the same module.
78 *
79 * The on-board 82586 NIC has trouble receiving multiple
80 * back-to-back frames and so is likely to drop packets from fast
81 * senders.
82 **/
83
84 #include <linux/module.h>
85
86 #include <linux/kernel.h>
87 #include <linux/sched.h>
88 #include <linux/types.h>
89 #include <linux/fcntl.h>
90 #include <linux/interrupt.h>
91 #include <linux/ptrace.h>
92 #include <linux/mca.h>
93 #include <linux/ioport.h>
94 #include <linux/in.h>
95 #include <linux/slab.h>
96 #include <linux/string.h>
97 #include <linux/ethtool.h>
98
99 #include <asm/uaccess.h>
100 #include <asm/system.h>
101 #include <asm/bitops.h>
102 #include <asm/io.h>
103 #include <asm/dma.h>
104 #include <linux/errno.h>
105 #include <linux/init.h>
106
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/skbuff.h>
110 #include <linux/if_ether.h>
111
112 #include "3c527.h"
113
114 MODULE_LICENSE("GPL");
115
116 /*
117 * The name of the card. Is used for messages and in the requests for
118 * io regions, irqs and dma channels
119 */
120 static const char* cardname = DRV_NAME;
121
122 /* use 0 for production, 1 for verification, >2 for debug */
123 #ifndef NET_DEBUG
124 #define NET_DEBUG 2
125 #endif
126
127 #undef DEBUG_IRQ
128
129 static unsigned int mc32_debug = NET_DEBUG;
130
131 /* The number of low I/O ports used by the ethercard. */
132 #define MC32_IO_EXTENT 8
133
134 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
135 #define TX_RING_LEN 32 /* Typically the card supports 37 */
136 #define RX_RING_LEN 8 /* " " " */
137
138 /* Copy break point, see above for details.
139 * Setting to > 1512 effectively disables this feature. */
140 #define RX_COPYBREAK 200 /* Value from 3c59x.c */
141
142 /* Issue the 82586 workaround command - this is for "busy lans", but
143 * basically means for all lans now days - has a performance (latency)
144 * cost, but best set. */
145 static const int WORKAROUND_82586=1;
146
147 /* Pointers to buffers and their on-card records */
148
149 struct mc32_ring_desc
150 {
151 volatile struct skb_header *p;
152 struct sk_buff *skb;
153 };
154
155
156 /* Information that needs to be kept for each board. */
157 struct mc32_local
158 {
159 struct net_device_stats net_stats;
160 int slot;
161 volatile struct mc32_mailbox *rx_box;
162 volatile struct mc32_mailbox *tx_box;
163 volatile struct mc32_mailbox *exec_box;
164 volatile struct mc32_stats *stats; /* Start of on-card statistics */
165 u16 tx_chain; /* Transmit list start offset */
166 u16 rx_chain; /* Receive list start offset */
167 u16 tx_len; /* Transmit list count */
168 u16 rx_len; /* Receive list count */
169
170 u32 base;
171 u16 exec_pending;
172 u16 mc_reload_wait; /* a multicast load request is pending */
173 u32 mc_list_valid; /* True when the mclist is set */
174 u16 xceiver_state; /* Current transceiver state. bitmapped */
175 u16 desired_state; /* The state we want the transceiver to be in */
176 atomic_t tx_count; /* buffers left */
177 wait_queue_head_t event;
178
179 struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
180 struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
181
182 u16 tx_ring_tail; /* index to tx de-queue end */
183 u16 tx_ring_head; /* index to tx en-queue end */
184
185 u16 rx_ring_tail; /* index to rx de-queue end */
186 };
187
188 /* The station (ethernet) address prefix, used for a sanity check. */
189 #define SA_ADDR0 0x02
190 #define SA_ADDR1 0x60
191 #define SA_ADDR2 0xAC
192
193 struct mca_adapters_t {
194 unsigned int id;
195 char *name;
196 };
197
198 const struct mca_adapters_t mc32_adapters[] = {
199 { 0x0041, "3COM EtherLink MC/32" },
200 { 0x8EF5, "IBM High Performance Lan Adapter" },
201 { 0x0000, NULL }
202 };
203
204
205 /* Macros for ring index manipulations */
next_rx(u16 rx)206 static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
prev_rx(u16 rx)207 static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
208
next_tx(u16 tx)209 static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
210
211
212 /* Index to functions, as function prototypes. */
213 extern int mc32_probe(struct net_device *dev);
214
215 static int mc32_probe1(struct net_device *dev, int ioaddr);
216 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
217 static int mc32_open(struct net_device *dev);
218 static void mc32_timeout(struct net_device *dev);
219 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev);
220 static void mc32_interrupt(int irq, void *dev_id, struct pt_regs *regs);
221 static int mc32_close(struct net_device *dev);
222 static struct net_device_stats *mc32_get_stats(struct net_device *dev);
223 static void mc32_set_multicast_list(struct net_device *dev);
224 static void mc32_reset_multicast_list(struct net_device *dev);
225 static struct ethtool_ops netdev_ethtool_ops;
226
227 /**
228 * mc32_probe - Search for supported boards
229 * @dev: device to probe
230 *
231 * Because MCA bus is a real bus and we can scan for cards we could do a
232 * single scan for all boards here. Right now we use the passed in device
233 * structure and scan for only one board. This needs fixing for modules
234 * in paticular.
235 */
236
mc32_probe(struct net_device * dev)237 int __init mc32_probe(struct net_device *dev)
238 {
239 static int current_mca_slot = -1;
240 int i;
241 int adapter_found = 0;
242
243 SET_MODULE_OWNER(dev);
244
245 /* Do not check any supplied i/o locations.
246 POS registers usually don't fail :) */
247
248 /* MCA cards have POS registers.
249 Autodetecting MCA cards is extremely simple.
250 Just search for the card. */
251
252 for(i = 0; (mc32_adapters[i].name != NULL) && !adapter_found; i++) {
253 current_mca_slot =
254 mca_find_unused_adapter(mc32_adapters[i].id, 0);
255
256 if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) {
257 if(!mc32_probe1(dev, current_mca_slot))
258 {
259 mca_set_adapter_name(current_mca_slot,
260 mc32_adapters[i].name);
261 mca_mark_as_used(current_mca_slot);
262 return 0;
263 }
264
265 }
266 }
267 return -ENODEV;
268 }
269
270 /**
271 * mc32_probe1 - Check a given slot for a board and test the card
272 * @dev: Device structure to fill in
273 * @slot: The MCA bus slot being used by this card
274 *
275 * Decode the slot data and configure the card structures. Having done this we
276 * can reset the card and configure it. The card does a full self test cycle
277 * in firmware so we have to wait for it to return and post us either a
278 * failure case or some addresses we use to find the board internals.
279 */
280
mc32_probe1(struct net_device * dev,int slot)281 static int __init mc32_probe1(struct net_device *dev, int slot)
282 {
283 static unsigned version_printed;
284 int i, err;
285 u8 POS;
286 u32 base;
287 struct mc32_local *lp;
288 static u16 mca_io_bases[]={
289 0x7280,0x7290,
290 0x7680,0x7690,
291 0x7A80,0x7A90,
292 0x7E80,0x7E90
293 };
294 static u32 mca_mem_bases[]={
295 0x00C0000,
296 0x00C4000,
297 0x00C8000,
298 0x00CC000,
299 0x00D0000,
300 0x00D4000,
301 0x00D8000,
302 0x00DC000
303 };
304 static char *failures[]={
305 "Processor instruction",
306 "Processor data bus",
307 "Processor data bus",
308 "Processor data bus",
309 "Adapter bus",
310 "ROM checksum",
311 "Base RAM",
312 "Extended RAM",
313 "82586 internal loopback",
314 "82586 initialisation failure",
315 "Adapter list configuration error"
316 };
317
318 /* Time to play MCA games */
319
320 if (mc32_debug && version_printed++ == 0)
321 printk(KERN_DEBUG "%s", version);
322
323 printk(KERN_INFO "%s: %s found in slot %d:", dev->name, cardname, slot);
324
325 POS = mca_read_stored_pos(slot, 2);
326
327 if(!(POS&1))
328 {
329 printk(" disabled.\n");
330 return -ENODEV;
331 }
332
333 /* Fill in the 'dev' fields. */
334 dev->base_addr = mca_io_bases[(POS>>1)&7];
335 dev->mem_start = mca_mem_bases[(POS>>4)&7];
336
337 POS = mca_read_stored_pos(slot, 4);
338 if(!(POS&1))
339 {
340 printk("memory window disabled.\n");
341 return -ENODEV;
342 }
343
344 POS = mca_read_stored_pos(slot, 5);
345
346 i=(POS>>4)&3;
347 if(i==3)
348 {
349 printk("invalid memory window.\n");
350 return -ENODEV;
351 }
352
353 i*=16384;
354 i+=16384;
355
356 dev->mem_end=dev->mem_start + i;
357
358 dev->irq = ((POS>>2)&3)+9;
359
360 if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
361 {
362 printk("io 0x%3lX, which is busy.\n", dev->base_addr);
363 return -EBUSY;
364 }
365
366 printk("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
367 dev->base_addr, dev->irq, dev->mem_start, i/1024);
368
369
370 /* We ought to set the cache line size here.. */
371
372
373 /*
374 * Go PROM browsing
375 */
376
377 printk("%s: Address ", dev->name);
378
379 /* Retrieve and print the ethernet address. */
380 for (i = 0; i < 6; i++)
381 {
382 mca_write_pos(slot, 6, i+12);
383 mca_write_pos(slot, 7, 0);
384
385 printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3));
386 }
387
388 mca_write_pos(slot, 6, 0);
389 mca_write_pos(slot, 7, 0);
390
391 POS = mca_read_stored_pos(slot, 4);
392
393 if(POS&2)
394 printk(" : BNC port selected.\n");
395 else
396 printk(" : AUI port selected.\n");
397
398 POS=inb(dev->base_addr+HOST_CTRL);
399 POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
400 POS&=~HOST_CTRL_INTE;
401 outb(POS, dev->base_addr+HOST_CTRL);
402 /* Reset adapter */
403 udelay(100);
404 /* Reset off */
405 POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
406 outb(POS, dev->base_addr+HOST_CTRL);
407
408 udelay(300);
409
410 /*
411 * Grab the IRQ
412 */
413
414 i = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ, dev->name, dev);
415 if (i) {
416 release_region(dev->base_addr, MC32_IO_EXTENT);
417 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
418 return i;
419 }
420
421
422 /* Initialize the device structure. */
423 dev->priv = kmalloc(sizeof(struct mc32_local), GFP_KERNEL);
424 if (dev->priv == NULL)
425 {
426 err = -ENOMEM;
427 goto err_exit_irq;
428 }
429
430 memset(dev->priv, 0, sizeof(struct mc32_local));
431 lp = dev->priv;
432 lp->slot = slot;
433
434 i=0;
435
436 base = inb(dev->base_addr);
437
438 while(base == 0xFF)
439 {
440 i++;
441 if(i == 1000)
442 {
443 printk(KERN_ERR "%s: failed to boot adapter.\n", dev->name);
444 err = -ENODEV;
445 goto err_exit_free;
446 }
447 udelay(1000);
448 if(inb(dev->base_addr+2)&(1<<5))
449 base = inb(dev->base_addr);
450 }
451
452 if(base>0)
453 {
454 if(base < 0x0C)
455 printk(KERN_ERR "%s: %s%s.\n", dev->name, failures[base-1],
456 base<0x0A?" test failure":"");
457 else
458 printk(KERN_ERR "%s: unknown failure %d.\n", dev->name, base);
459 err = -ENODEV;
460 goto err_exit_free;
461 }
462
463 base=0;
464 for(i=0;i<4;i++)
465 {
466 int n=0;
467
468 while(!(inb(dev->base_addr+2)&(1<<5)))
469 {
470 n++;
471 udelay(50);
472 if(n>100)
473 {
474 printk(KERN_ERR "%s: mailbox read fail (%d).\n", dev->name, i);
475 err = -ENODEV;
476 goto err_exit_free;
477 }
478 }
479
480 base|=(inb(dev->base_addr)<<(8*i));
481 }
482
483 lp->exec_box=bus_to_virt(dev->mem_start+base);
484
485 base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
486
487 lp->base = dev->mem_start+base;
488
489 lp->rx_box=bus_to_virt(lp->base + lp->exec_box->data[2]);
490 lp->tx_box=bus_to_virt(lp->base + lp->exec_box->data[3]);
491
492 lp->stats = bus_to_virt(lp->base + lp->exec_box->data[5]);
493
494 /*
495 * Descriptor chains (card relative)
496 */
497
498 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
499 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
500 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
501 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
502
503 init_waitqueue_head(&lp->event);
504
505 printk("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
506 dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
507
508 dev->open = mc32_open;
509 dev->stop = mc32_close;
510 dev->hard_start_xmit = mc32_send_packet;
511 dev->get_stats = mc32_get_stats;
512 dev->set_multicast_list = mc32_set_multicast_list;
513 dev->tx_timeout = mc32_timeout;
514 dev->watchdog_timeo = HZ*5; /* Board does all the work */
515 dev->ethtool_ops = &netdev_ethtool_ops;
516
517 lp->xceiver_state = HALTED;
518
519 lp->tx_ring_tail=lp->tx_ring_head=0;
520
521 /* Fill in the fields of the device structure with ethernet values. */
522 ether_setup(dev);
523
524 return 0;
525
526 err_exit_free:
527 kfree(dev->priv);
528 err_exit_irq:
529 free_irq(dev->irq, dev);
530 release_region(dev->base_addr, MC32_IO_EXTENT);
531 return err;
532 }
533
534
535 /**
536 * mc32_ready_poll - wait until we can feed it a command
537 * @dev: The device to wait for
538 *
539 * Wait until the card becomes ready to accept a command via the
540 * command register. This tells us nothing about the completion
541 * status of any pending commands and takes very little time at all.
542 */
543
mc32_ready_poll(struct net_device * dev)544 static void mc32_ready_poll(struct net_device *dev)
545 {
546 int ioaddr = dev->base_addr;
547 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
548 }
549
550
551 /**
552 * mc32_command_nowait - send a command non blocking
553 * @dev: The 3c527 to issue the command to
554 * @cmd: The command word to write to the mailbox
555 * @data: A data block if the command expects one
556 * @len: Length of the data block
557 *
558 * Send a command from interrupt state. If there is a command
559 * currently being executed then we return an error of -1. It simply
560 * isn't viable to wait around as commands may be slow. Providing we
561 * get in, we busy wait for the board to become ready to accept the
562 * command and issue it. We do not wait for the command to complete
563 * --- the card will interrupt us when it's done.
564 */
565
mc32_command_nowait(struct net_device * dev,u16 cmd,void * data,int len)566 static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
567 {
568 struct mc32_local *lp = (struct mc32_local *)dev->priv;
569 int ioaddr = dev->base_addr;
570
571 if(lp->exec_pending)
572 return -1;
573
574 lp->exec_pending=3;
575 lp->exec_box->mbox=0;
576 lp->exec_box->mbox=cmd;
577 memcpy((void *)lp->exec_box->data, data, len);
578 barrier(); /* the memcpy forgot the volatile so be sure */
579
580 /* Send the command */
581 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
582 outb(1<<6, ioaddr+HOST_CMD);
583 return 0;
584 }
585
586
587 /**
588 * mc32_command - send a command and sleep until completion
589 * @dev: The 3c527 card to issue the command to
590 * @cmd: The command word to write to the mailbox
591 * @data: A data block if the command expects one
592 * @len: Length of the data block
593 *
594 * Sends exec commands in a user context. This permits us to wait around
595 * for the replies and also to wait for the command buffer to complete
596 * from a previous command before we execute our command. After our
597 * command completes we will complete any pending multicast reload
598 * we blocked off by hogging the exec buffer.
599 *
600 * You feed the card a command, you wait, it interrupts you get a
601 * reply. All well and good. The complication arises because you use
602 * commands for filter list changes which come in at bh level from things
603 * like IPV6 group stuff.
604 *
605 * We have a simple state machine
606 *
607 * 0 - nothing issued
608 *
609 * 1 - command issued, wait reply
610 *
611 * 2 - reply waiting - reader then goes to state 0
612 *
613 * 3 - command issued, trash reply. In which case the irq
614 * takes it back to state 0
615 *
616 */
617
mc32_command(struct net_device * dev,u16 cmd,void * data,int len)618 static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
619 {
620 struct mc32_local *lp = (struct mc32_local *)dev->priv;
621 int ioaddr = dev->base_addr;
622 unsigned long flags;
623 int ret = 0;
624
625 /*
626 * Wait for a command
627 */
628
629 save_flags(flags);
630 cli();
631
632 while(lp->exec_pending)
633 sleep_on(&lp->event);
634
635 /*
636 * Issue mine
637 */
638
639 lp->exec_pending=1;
640
641 restore_flags(flags);
642
643 lp->exec_box->mbox=0;
644 lp->exec_box->mbox=cmd;
645 memcpy((void *)lp->exec_box->data, data, len);
646 barrier(); /* the memcpy forgot the volatile so be sure */
647
648 /* Send the command */
649 while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
650 outb(1<<6, ioaddr+HOST_CMD);
651
652 save_flags(flags);
653 cli();
654
655 while(lp->exec_pending!=2)
656 sleep_on(&lp->event);
657 lp->exec_pending=0;
658 restore_flags(flags);
659
660 if(lp->exec_box->mbox&(1<<13))
661 ret = -1;
662
663 /*
664 * A multicast set got blocked - do it now
665 */
666
667 if(lp->mc_reload_wait)
668 {
669 mc32_reset_multicast_list(dev);
670 }
671
672 return ret;
673 }
674
675
676 /**
677 * mc32_start_transceiver - tell board to restart tx/rx
678 * @dev: The 3c527 card to issue the command to
679 *
680 * This may be called from the interrupt state, where it is used
681 * to restart the rx ring if the card runs out of rx buffers.
682 *
683 * First, we check if it's ok to start the transceiver. We then show
684 * the card where to start in the rx ring and issue the
685 * commands to start reception and transmission. We don't wait
686 * around for these to complete.
687 */
688
mc32_start_transceiver(struct net_device * dev)689 static void mc32_start_transceiver(struct net_device *dev) {
690
691 struct mc32_local *lp = (struct mc32_local *)dev->priv;
692 int ioaddr = dev->base_addr;
693
694 /* Ignore RX overflow on device closure */
695 if (lp->desired_state==HALTED)
696 return;
697
698 mc32_ready_poll(dev);
699
700 lp->tx_box->mbox=0;
701 lp->rx_box->mbox=0;
702
703 /* Give the card the offset to the post-EOL-bit RX descriptor */
704 lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
705
706 outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
707
708 mc32_ready_poll(dev);
709 outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
710
711 /* We are not interrupted on start completion */
712 lp->xceiver_state=RUNNING;
713 }
714
715
716 /**
717 * mc32_halt_transceiver - tell board to stop tx/rx
718 * @dev: The 3c527 card to issue the command to
719 *
720 * We issue the commands to halt the card's transceiver. In fact,
721 * after some experimenting we now simply tell the card to
722 * suspend. When issuing aborts occasionally odd things happened.
723 *
724 * We then sleep until the card has notified us that both rx and
725 * tx have been suspended.
726 */
727
mc32_halt_transceiver(struct net_device * dev)728 static void mc32_halt_transceiver(struct net_device *dev)
729 {
730 struct mc32_local *lp = (struct mc32_local *)dev->priv;
731 int ioaddr = dev->base_addr;
732 unsigned long flags;
733
734 mc32_ready_poll(dev);
735
736 lp->tx_box->mbox=0;
737 lp->rx_box->mbox=0;
738
739 outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
740 mc32_ready_poll(dev);
741 outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
742
743 save_flags(flags);
744 cli();
745
746 while(lp->xceiver_state!=HALTED)
747 sleep_on(&lp->event);
748
749 restore_flags(flags);
750 }
751
752
753 /**
754 * mc32_load_rx_ring - load the ring of receive buffers
755 * @dev: 3c527 to build the ring for
756 *
757 * This initalises the on-card and driver datastructures to
758 * the point where mc32_start_transceiver() can be called.
759 *
760 * The card sets up the receive ring for us. We are required to use the
761 * ring it provides although we can change the size of the ring.
762 *
763 * We allocate an sk_buff for each ring entry in turn and
764 * initalise its house-keeping info. At the same time, we read
765 * each 'next' pointer in our rx_ring array. This reduces slow
766 * shared-memory reads and makes it easy to access predecessor
767 * descriptors.
768 *
769 * We then set the end-of-list bit for the last entry so that the
770 * card will know when it has run out of buffers.
771 */
772
mc32_load_rx_ring(struct net_device * dev)773 static int mc32_load_rx_ring(struct net_device *dev)
774 {
775 struct mc32_local *lp = (struct mc32_local *)dev->priv;
776 int i;
777 u16 rx_base;
778 volatile struct skb_header *p;
779
780 rx_base=lp->rx_chain;
781
782 for(i=0;i<RX_RING_LEN;i++)
783 {
784 lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
785 skb_reserve(lp->rx_ring[i].skb, 18);
786
787 if(lp->rx_ring[i].skb==NULL)
788 {
789 for(;i>=0;i--)
790 kfree_skb(lp->rx_ring[i].skb);
791 return -ENOBUFS;
792 }
793
794 p=bus_to_virt(lp->base+rx_base);
795
796 p->control=0;
797 p->data=virt_to_bus(lp->rx_ring[i].skb->data);
798 p->status=0;
799 p->length=1532;
800
801 lp->rx_ring[i].p=p;
802 rx_base=p->next;
803 }
804
805 lp->rx_ring[i-1].p->control |= CONTROL_EOL;
806
807 lp->rx_ring_tail=0;
808
809 return 0;
810 }
811
812
813 /**
814 * mc32_flush_rx_ring - free the ring of receive buffers
815 * @lp: Local data of 3c527 to flush the rx ring of
816 *
817 * Free the buffer for each ring slot. This may be called
818 * before mc32_load_rx_ring(), eg. on error in mc32_open().
819 */
820
mc32_flush_rx_ring(struct net_device * dev)821 static void mc32_flush_rx_ring(struct net_device *dev)
822 {
823 struct mc32_local *lp = (struct mc32_local *)dev->priv;
824
825 struct sk_buff *skb;
826 int i;
827
828 for(i=0; i < RX_RING_LEN; i++)
829 {
830 skb = lp->rx_ring[i].skb;
831 if (skb!=NULL) {
832 kfree_skb(skb);
833 skb=NULL;
834 }
835 lp->rx_ring[i].p=NULL;
836 }
837 }
838
839
840 /**
841 * mc32_load_tx_ring - load transmit ring
842 * @dev: The 3c527 card to issue the command to
843 *
844 * This sets up the host transmit data-structures.
845 *
846 * First, we obtain from the card it's current postion in the tx
847 * ring, so that we will know where to begin transmitting
848 * packets.
849 *
850 * Then, we read the 'next' pointers from the on-card tx ring into
851 * our tx_ring array to reduce slow shared-mem reads. Finally, we
852 * intitalise the tx house keeping variables.
853 *
854 */
855
mc32_load_tx_ring(struct net_device * dev)856 static void mc32_load_tx_ring(struct net_device *dev)
857 {
858 struct mc32_local *lp = (struct mc32_local *)dev->priv;
859 volatile struct skb_header *p;
860 int i;
861 u16 tx_base;
862
863 tx_base=lp->tx_box->data[0];
864
865 for(i=0;i<lp->tx_len;i++)
866 {
867 p=bus_to_virt(lp->base+tx_base);
868 lp->tx_ring[i].p=p;
869 lp->tx_ring[i].skb=NULL;
870
871 tx_base=p->next;
872 }
873
874 /* -1 so that tx_ring_head cannot "lap" tx_ring_tail, */
875 /* which would be bad news for mc32_tx_ring as cur. implemented */
876
877 atomic_set(&lp->tx_count, TX_RING_LEN-1);
878 lp->tx_ring_head=lp->tx_ring_tail=0;
879 }
880
881
882 /**
883 * mc32_flush_tx_ring - free transmit ring
884 * @lp: Local data of 3c527 to flush the tx ring of
885 *
886 * We have to consider two cases here. We want to free the pending
887 * buffers only. If the ring buffer head is past the start then the
888 * ring segment we wish to free wraps through zero. The tx ring
889 * house-keeping variables are then reset.
890 */
891
mc32_flush_tx_ring(struct net_device * dev)892 static void mc32_flush_tx_ring(struct net_device *dev)
893 {
894 struct mc32_local *lp = (struct mc32_local *)dev->priv;
895
896 if(lp->tx_ring_tail!=lp->tx_ring_head)
897 {
898 int i;
899 if(lp->tx_ring_tail < lp->tx_ring_head)
900 {
901 for(i=lp->tx_ring_tail;i<lp->tx_ring_head;i++)
902 {
903 dev_kfree_skb(lp->tx_ring[i].skb);
904 lp->tx_ring[i].skb=NULL;
905 lp->tx_ring[i].p=NULL;
906 }
907 }
908 else
909 {
910 for(i=lp->tx_ring_tail; i<TX_RING_LEN; i++)
911 {
912 dev_kfree_skb(lp->tx_ring[i].skb);
913 lp->tx_ring[i].skb=NULL;
914 lp->tx_ring[i].p=NULL;
915 }
916 for(i=0; i<lp->tx_ring_head; i++)
917 {
918 dev_kfree_skb(lp->tx_ring[i].skb);
919 lp->tx_ring[i].skb=NULL;
920 lp->tx_ring[i].p=NULL;
921 }
922 }
923 }
924
925 atomic_set(&lp->tx_count, 0);
926 lp->tx_ring_tail=lp->tx_ring_head=0;
927 }
928
929
930 /**
931 * mc32_open - handle 'up' of card
932 * @dev: device to open
933 *
934 * The user is trying to bring the card into ready state. This requires
935 * a brief dialogue with the card. Firstly we enable interrupts and then
936 * 'indications'. Without these enabled the card doesn't bother telling
937 * us what it has done. This had me puzzled for a week.
938 *
939 * We configure the number of card descriptors, then load the network
940 * address and multicast filters. Turn on the workaround mode. This
941 * works around a bug in the 82586 - it asks the firmware to do
942 * so. It has a performance (latency) hit but is needed on busy
943 * [read most] lans. We load the ring with buffers then we kick it
944 * all off.
945 */
946
mc32_open(struct net_device * dev)947 static int mc32_open(struct net_device *dev)
948 {
949 int ioaddr = dev->base_addr;
950 struct mc32_local *lp = (struct mc32_local *)dev->priv;
951 u8 one=1;
952 u8 regs;
953 u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
954
955 /*
956 * Interrupts enabled
957 */
958
959 regs=inb(ioaddr+HOST_CTRL);
960 regs|=HOST_CTRL_INTE;
961 outb(regs, ioaddr+HOST_CTRL);
962
963
964 /*
965 * Send the indications on command
966 */
967
968 mc32_command(dev, 4, &one, 2);
969
970 /*
971 * Poke it to make sure it's really dead.
972 */
973
974 mc32_halt_transceiver(dev);
975 mc32_flush_tx_ring(dev);
976
977 /*
978 * Ask card to set up on-card descriptors to our spec
979 */
980
981 if(mc32_command(dev, 8, descnumbuffs, 4)) {
982 printk("%s: %s rejected our buffer configuration!\n",
983 dev->name, cardname);
984 mc32_close(dev);
985 return -ENOBUFS;
986 }
987
988 /* Report new configuration */
989 mc32_command(dev, 6, NULL, 0);
990
991 lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
992 lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
993 lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
994 lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
995
996 /* Set Network Address */
997 mc32_command(dev, 1, dev->dev_addr, 6);
998
999 /* Set the filters */
1000 mc32_set_multicast_list(dev);
1001
1002 if (WORKAROUND_82586) {
1003 u16 zero_word=0;
1004 mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
1005 }
1006
1007 mc32_load_tx_ring(dev);
1008
1009 if(mc32_load_rx_ring(dev))
1010 {
1011 mc32_close(dev);
1012 return -ENOBUFS;
1013 }
1014
1015 lp->desired_state = RUNNING;
1016
1017 /* And finally, set the ball rolling... */
1018 mc32_start_transceiver(dev);
1019
1020 netif_start_queue(dev);
1021
1022 return 0;
1023 }
1024
1025
1026 /**
1027 * mc32_timeout - handle a timeout from the network layer
1028 * @dev: 3c527 that timed out
1029 *
1030 * Handle a timeout on transmit from the 3c527. This normally means
1031 * bad things as the hardware handles cable timeouts and mess for
1032 * us.
1033 *
1034 */
1035
mc32_timeout(struct net_device * dev)1036 static void mc32_timeout(struct net_device *dev)
1037 {
1038 printk(KERN_WARNING "%s: transmit timed out?\n", dev->name);
1039 /* Try to restart the adaptor. */
1040 netif_wake_queue(dev);
1041 }
1042
1043
1044 /**
1045 * mc32_send_packet - queue a frame for transmit
1046 * @skb: buffer to transmit
1047 * @dev: 3c527 to send it out of
1048 *
1049 * Transmit a buffer. This normally means throwing the buffer onto
1050 * the transmit queue as the queue is quite large. If the queue is
1051 * full then we set tx_busy and return. Once the interrupt handler
1052 * gets messages telling it to reclaim transmit queue entries we will
1053 * clear tx_busy and the kernel will start calling this again.
1054 *
1055 * We use cli rather than spinlocks. Since I have no access to an SMP
1056 * MCA machine I don't plan to change it. It is probably the top
1057 * performance hit for this driver on SMP however.
1058 */
1059
mc32_send_packet(struct sk_buff * skb,struct net_device * dev)1060 static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
1061 {
1062 struct mc32_local *lp = (struct mc32_local *)dev->priv;
1063 unsigned long flags;
1064
1065 volatile struct skb_header *p, *np;
1066
1067 netif_stop_queue(dev);
1068
1069 save_flags(flags);
1070 cli();
1071
1072 if(atomic_read(&lp->tx_count)==0)
1073 {
1074 restore_flags(flags);
1075 return 1;
1076 }
1077
1078 atomic_dec(&lp->tx_count);
1079
1080 /* P is the last sending/sent buffer as a pointer */
1081 p=lp->tx_ring[lp->tx_ring_head].p;
1082
1083 lp->tx_ring_head=next_tx(lp->tx_ring_head);
1084
1085 /* NP is the buffer we will be loading */
1086 np=lp->tx_ring[lp->tx_ring_head].p;
1087
1088 if(skb->len < ETH_ZLEN)
1089 {
1090 skb = skb_padto(skb, ETH_ZLEN);
1091 if(skb == NULL)
1092 goto out;
1093 }
1094
1095 /* We will need this to flush the buffer out */
1096 lp->tx_ring[lp->tx_ring_head].skb=skb;
1097
1098 np->length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1099
1100 np->data = virt_to_bus(skb->data);
1101 np->status = 0;
1102 np->control = CONTROL_EOP | CONTROL_EOL;
1103 wmb();
1104
1105 p->control &= ~CONTROL_EOL; /* Clear EOL on p */
1106 out:
1107 restore_flags(flags);
1108
1109 netif_wake_queue(dev);
1110 return 0;
1111 }
1112
1113
1114 /**
1115 * mc32_update_stats - pull off the on board statistics
1116 * @dev: 3c527 to service
1117 *
1118 *
1119 * Query and reset the on-card stats. There's the small possibility
1120 * of a race here, which would result in an underestimation of
1121 * actual errors. As such, we'd prefer to keep all our stats
1122 * collection in software. As a rule, we do. However it can't be
1123 * used for rx errors and collisions as, by default, the card discards
1124 * bad rx packets.
1125 *
1126 * Setting the SAV BP in the rx filter command supposedly
1127 * stops this behaviour. However, testing shows that it only seems to
1128 * enable the collation of on-card rx statistics --- the driver
1129 * never sees an RX descriptor with an error status set.
1130 *
1131 */
1132
mc32_update_stats(struct net_device * dev)1133 static void mc32_update_stats(struct net_device *dev)
1134 {
1135 struct mc32_local *lp = (struct mc32_local *)dev->priv;
1136 volatile struct mc32_stats *st = lp->stats;
1137
1138 u32 rx_errors=0;
1139
1140 rx_errors+=lp->net_stats.rx_crc_errors +=st->rx_crc_errors;
1141 st->rx_crc_errors=0;
1142 rx_errors+=lp->net_stats.rx_fifo_errors +=st->rx_overrun_errors;
1143 st->rx_overrun_errors=0;
1144 rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors;
1145 st->rx_alignment_errors=0;
1146 rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors;
1147 st->rx_tooshort_errors=0;
1148 rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
1149 st->rx_outofresource_errors=0;
1150 lp->net_stats.rx_errors=rx_errors;
1151
1152 /* Number of packets which saw one collision */
1153 lp->net_stats.collisions+=st->dataC[10];
1154 st->dataC[10]=0;
1155
1156 /* Number of packets which saw 2--15 collisions */
1157 lp->net_stats.collisions+=st->dataC[11];
1158 st->dataC[11]=0;
1159 }
1160
1161
1162 /**
1163 * mc32_rx_ring - process the receive ring
1164 * @dev: 3c527 that needs its receive ring processing
1165 *
1166 *
1167 * We have received one or more indications from the card that a
1168 * receive has completed. The buffer ring thus contains dirty
1169 * entries. We walk the ring by iterating over the circular rx_ring
1170 * array, starting at the next dirty buffer (which happens to be the
1171 * one we finished up at last time around).
1172 *
1173 * For each completed packet, we will either copy it and pass it up
1174 * the stack or, if the packet is near MTU sized, we allocate
1175 * another buffer and flip the old one up the stack.
1176 *
1177 * We must succeed in keeping a buffer on the ring. If neccessary we
1178 * will toss a received packet rather than lose a ring entry. Once
1179 * the first uncompleted descriptor is found, we move the
1180 * End-Of-List bit to include the buffers just processed.
1181 *
1182 */
1183
mc32_rx_ring(struct net_device * dev)1184 static void mc32_rx_ring(struct net_device *dev)
1185 {
1186 struct mc32_local *lp=dev->priv;
1187 volatile struct skb_header *p;
1188 u16 rx_ring_tail = lp->rx_ring_tail;
1189 u16 rx_old_tail = rx_ring_tail;
1190
1191 int x=0;
1192
1193 do
1194 {
1195 p=lp->rx_ring[rx_ring_tail].p;
1196
1197 if(!(p->status & (1<<7))) { /* Not COMPLETED */
1198 break;
1199 }
1200 if(p->status & (1<<6)) /* COMPLETED_OK */
1201 {
1202
1203 u16 length=p->length;
1204 struct sk_buff *skb;
1205 struct sk_buff *newskb;
1206
1207 /* Try to save time by avoiding a copy on big frames */
1208
1209 if ((length > RX_COPYBREAK)
1210 && ((newskb=dev_alloc_skb(1532)) != NULL))
1211 {
1212 skb=lp->rx_ring[rx_ring_tail].skb;
1213 skb_put(skb, length);
1214
1215 skb_reserve(newskb,18);
1216 lp->rx_ring[rx_ring_tail].skb=newskb;
1217 p->data=virt_to_bus(newskb->data);
1218 }
1219 else
1220 {
1221 skb=dev_alloc_skb(length+2);
1222
1223 if(skb==NULL) {
1224 lp->net_stats.rx_dropped++;
1225 goto dropped;
1226 }
1227
1228 skb_reserve(skb,2);
1229 memcpy(skb_put(skb, length),
1230 lp->rx_ring[rx_ring_tail].skb->data, length);
1231 }
1232
1233 skb->protocol=eth_type_trans(skb,dev);
1234 skb->dev=dev;
1235 dev->last_rx = jiffies;
1236 lp->net_stats.rx_packets++;
1237 lp->net_stats.rx_bytes += length;
1238 netif_rx(skb);
1239 }
1240
1241 dropped:
1242 p->length = 1532;
1243 p->status = 0;
1244
1245 rx_ring_tail=next_rx(rx_ring_tail);
1246 }
1247 while(x++<48);
1248
1249 /* If there was actually a frame to be processed, place the EOL bit */
1250 /* at the descriptor prior to the one to be filled next */
1251
1252 if (rx_ring_tail != rx_old_tail)
1253 {
1254 lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
1255 lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
1256
1257 lp->rx_ring_tail=rx_ring_tail;
1258 }
1259 }
1260
1261
1262 /**
1263 * mc32_tx_ring - process completed transmits
1264 * @dev: 3c527 that needs its transmit ring processing
1265 *
1266 *
1267 * This operates in a similar fashion to mc32_rx_ring. We iterate
1268 * over the transmit ring. For each descriptor which has been
1269 * processed by the card, we free its associated buffer and note
1270 * any errors. This continues until the transmit ring is emptied
1271 * or we reach a descriptor that hasn't yet been processed by the
1272 * card.
1273 *
1274 */
1275
mc32_tx_ring(struct net_device * dev)1276 static void mc32_tx_ring(struct net_device *dev)
1277 {
1278 struct mc32_local *lp=(struct mc32_local *)dev->priv;
1279 volatile struct skb_header *np;
1280
1281 /* NB: lp->tx_count=TX_RING_LEN-1 so that tx_ring_head cannot "lap" tail here */
1282
1283 while (lp->tx_ring_tail != lp->tx_ring_head)
1284 {
1285 u16 t;
1286
1287 t=next_tx(lp->tx_ring_tail);
1288 np=lp->tx_ring[t].p;
1289
1290 if(!(np->status & (1<<7)))
1291 {
1292 /* Not COMPLETED */
1293 break;
1294 }
1295 lp->net_stats.tx_packets++;
1296 if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
1297 {
1298 lp->net_stats.tx_errors++;
1299
1300 switch(np->status&0x0F)
1301 {
1302 case 1:
1303 lp->net_stats.tx_aborted_errors++;
1304 break; /* Max collisions */
1305 case 2:
1306 lp->net_stats.tx_fifo_errors++;
1307 break;
1308 case 3:
1309 lp->net_stats.tx_carrier_errors++;
1310 break;
1311 case 4:
1312 lp->net_stats.tx_window_errors++;
1313 break; /* CTS Lost */
1314 case 5:
1315 lp->net_stats.tx_aborted_errors++;
1316 break; /* Transmit timeout */
1317 }
1318 }
1319 /* Packets are sent in order - this is
1320 basically a FIFO queue of buffers matching
1321 the card ring */
1322 lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
1323 dev_kfree_skb_irq(lp->tx_ring[t].skb);
1324 lp->tx_ring[t].skb=NULL;
1325 atomic_inc(&lp->tx_count);
1326 netif_wake_queue(dev);
1327
1328 lp->tx_ring_tail=t;
1329 }
1330
1331 }
1332
1333
1334 /**
1335 * mc32_interrupt - handle an interrupt from a 3c527
1336 * @irq: Interrupt number
1337 * @dev_id: 3c527 that requires servicing
1338 * @regs: Registers (unused)
1339 *
1340 *
1341 * An interrupt is raised whenever the 3c527 writes to the command
1342 * register. This register contains the message it wishes to send us
1343 * packed into a single byte field. We keep reading status entries
1344 * until we have processed all the control items, but simply count
1345 * transmit and receive reports. When all reports are in we empty the
1346 * transceiver rings as appropriate. This saves the overhead of
1347 * multiple command requests.
1348 *
1349 * Because MCA is level-triggered, we shouldn't miss indications.
1350 * Therefore, we needn't ask the card to suspend interrupts within
1351 * this handler. The card receives an implicit acknowledgment of the
1352 * current interrupt when we read the command register.
1353 *
1354 */
1355
mc32_interrupt(int irq,void * dev_id,struct pt_regs * regs)1356 static void mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1357 {
1358 struct net_device *dev = dev_id;
1359 struct mc32_local *lp;
1360 int ioaddr, status, boguscount = 0;
1361 int rx_event = 0;
1362 int tx_event = 0;
1363
1364 if (dev == NULL) {
1365 printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
1366 return;
1367 }
1368
1369 ioaddr = dev->base_addr;
1370 lp = (struct mc32_local *)dev->priv;
1371
1372 /* See whats cooking */
1373
1374 while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
1375 {
1376 status=inb(ioaddr+HOST_CMD);
1377
1378 #ifdef DEBUG_IRQ
1379 printk("Status TX%d RX%d EX%d OV%d BC%d\n",
1380 (status&7), (status>>3)&7, (status>>6)&1,
1381 (status>>7)&1, boguscount);
1382 #endif
1383
1384 switch(status&7)
1385 {
1386 case 0:
1387 break;
1388 case 6: /* TX fail */
1389 case 2: /* TX ok */
1390 tx_event = 1;
1391 break;
1392 case 3: /* Halt */
1393 case 4: /* Abort */
1394 lp->xceiver_state |= TX_HALTED;
1395 wake_up(&lp->event);
1396 break;
1397 default:
1398 printk("%s: strange tx ack %d\n", dev->name, status&7);
1399 }
1400 status>>=3;
1401 switch(status&7)
1402 {
1403 case 0:
1404 break;
1405 case 2: /* RX */
1406 rx_event=1;
1407 break;
1408 case 3: /* Halt */
1409 case 4: /* Abort */
1410 lp->xceiver_state |= RX_HALTED;
1411 wake_up(&lp->event);
1412 break;
1413 case 6:
1414 /* Out of RX buffers stat */
1415 /* Must restart rx */
1416 lp->net_stats.rx_dropped++;
1417 mc32_rx_ring(dev);
1418 mc32_start_transceiver(dev);
1419 break;
1420 default:
1421 printk("%s: strange rx ack %d\n",
1422 dev->name, status&7);
1423 }
1424 status>>=3;
1425 if(status&1)
1426 {
1427
1428 /* 0=no 1=yes 2=replied, get cmd, 3 = wait reply & dump it */
1429
1430 if(lp->exec_pending!=3) {
1431 lp->exec_pending=2;
1432 wake_up(&lp->event);
1433 }
1434 else
1435 {
1436 lp->exec_pending=0;
1437
1438 /* A new multicast set may have been
1439 blocked while the old one was
1440 running. If so, do it now. */
1441
1442 if (lp->mc_reload_wait)
1443 mc32_reset_multicast_list(dev);
1444 else
1445 wake_up(&lp->event);
1446 }
1447 }
1448 if(status&2)
1449 {
1450 /*
1451 * We get interrupted once per
1452 * counter that is about to overflow.
1453 */
1454
1455 mc32_update_stats(dev);
1456 }
1457 }
1458
1459
1460 /*
1461 * Process the transmit and receive rings
1462 */
1463
1464 if(tx_event)
1465 mc32_tx_ring(dev);
1466
1467 if(rx_event)
1468 mc32_rx_ring(dev);
1469
1470 return;
1471 }
1472
1473
1474 /**
1475 * mc32_close - user configuring the 3c527 down
1476 * @dev: 3c527 card to shut down
1477 *
1478 * The 3c527 is a bus mastering device. We must be careful how we
1479 * shut it down. It may also be running shared interrupt so we have
1480 * to be sure to silence it properly
1481 *
1482 * We indicate that the card is closing to the rest of the
1483 * driver. Otherwise, it is possible that the card may run out
1484 * of receive buffers and restart the transceiver while we're
1485 * trying to close it.
1486 *
1487 * We abort any receive and transmits going on and then wait until
1488 * any pending exec commands have completed in other code threads.
1489 * In theory we can't get here while that is true, in practice I am
1490 * paranoid
1491 *
1492 * We turn off the interrupt enable for the board to be sure it can't
1493 * intefere with other devices.
1494 */
1495
mc32_close(struct net_device * dev)1496 static int mc32_close(struct net_device *dev)
1497 {
1498 struct mc32_local *lp = (struct mc32_local *)dev->priv;
1499
1500 int ioaddr = dev->base_addr;
1501 u8 regs;
1502 u16 one=1;
1503
1504 lp->desired_state = HALTED;
1505 netif_stop_queue(dev);
1506
1507 /*
1508 * Send the indications on command (handy debug check)
1509 */
1510
1511 mc32_command(dev, 4, &one, 2);
1512
1513 /* Shut down the transceiver */
1514
1515 mc32_halt_transceiver(dev);
1516
1517 /* Catch any waiting commands */
1518
1519 while(lp->exec_pending==1)
1520 sleep_on(&lp->event);
1521
1522 /* Ok the card is now stopping */
1523
1524 regs=inb(ioaddr+HOST_CTRL);
1525 regs&=~HOST_CTRL_INTE;
1526 outb(regs, ioaddr+HOST_CTRL);
1527
1528 mc32_flush_rx_ring(dev);
1529 mc32_flush_tx_ring(dev);
1530
1531 mc32_update_stats(dev);
1532
1533 return 0;
1534 }
1535
1536
1537 /**
1538 * mc32_get_stats - hand back stats to network layer
1539 * @dev: The 3c527 card to handle
1540 *
1541 * We've collected all the stats we can in software already. Now
1542 * it's time to update those kept on-card and return the lot.
1543 *
1544 */
1545
mc32_get_stats(struct net_device * dev)1546 static struct net_device_stats *mc32_get_stats(struct net_device *dev)
1547 {
1548 struct mc32_local *lp;
1549
1550 mc32_update_stats(dev);
1551
1552 lp = (struct mc32_local *)dev->priv;
1553
1554 return &lp->net_stats;
1555 }
1556
1557
1558 /**
1559 * do_mc32_set_multicast_list - attempt to update multicasts
1560 * @dev: 3c527 device to load the list on
1561 * @retry: indicates this is not the first call.
1562 *
1563 *
1564 * Actually set or clear the multicast filter for this adaptor. The
1565 * locking issues are handled by this routine. We have to track
1566 * state as it may take multiple calls to get the command sequence
1567 * completed. We just keep trying to schedule the loads until we
1568 * manage to process them all.
1569 *
1570 * num_addrs == -1 Promiscuous mode, receive all packets
1571 *
1572 * num_addrs == 0 Normal mode, clear multicast list
1573 *
1574 * num_addrs > 0 Multicast mode, receive normal and MC packets,
1575 * and do best-effort filtering.
1576 *
1577 * See mc32_update_stats() regards setting the SAV BP bit.
1578 *
1579 */
1580
do_mc32_set_multicast_list(struct net_device * dev,int retry)1581 static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1582 {
1583 struct mc32_local *lp = (struct mc32_local *)dev->priv;
1584 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
1585
1586 if (dev->flags&IFF_PROMISC)
1587 /* Enable promiscuous mode */
1588 filt |= 1;
1589 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
1590 {
1591 dev->flags|=IFF_PROMISC;
1592 filt |= 1;
1593 }
1594 else if(dev->mc_count)
1595 {
1596 unsigned char block[62];
1597 unsigned char *bp;
1598 struct dev_mc_list *dmc=dev->mc_list;
1599
1600 int i;
1601
1602 if(retry==0)
1603 lp->mc_list_valid = 0;
1604 if(!lp->mc_list_valid)
1605 {
1606 block[1]=0;
1607 block[0]=dev->mc_count;
1608 bp=block+2;
1609
1610 for(i=0;i<dev->mc_count;i++)
1611 {
1612 memcpy(bp, dmc->dmi_addr, 6);
1613 bp+=6;
1614 dmc=dmc->next;
1615 }
1616 if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
1617 {
1618 lp->mc_reload_wait = 1;
1619 return;
1620 }
1621 lp->mc_list_valid=1;
1622 }
1623 }
1624
1625 if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1626 {
1627 lp->mc_reload_wait = 1;
1628 }
1629 else {
1630 lp->mc_reload_wait = 0;
1631 }
1632 }
1633
1634
1635 /**
1636 * mc32_set_multicast_list - queue multicast list update
1637 * @dev: The 3c527 to use
1638 *
1639 * Commence loading the multicast list. This is called when the kernel
1640 * changes the lists. It will override any pending list we are trying to
1641 * load.
1642 */
1643
mc32_set_multicast_list(struct net_device * dev)1644 static void mc32_set_multicast_list(struct net_device *dev)
1645 {
1646 do_mc32_set_multicast_list(dev,0);
1647 }
1648
1649
1650 /**
1651 * mc32_reset_multicast_list - reset multicast list
1652 * @dev: The 3c527 to use
1653 *
1654 * Attempt the next step in loading the multicast lists. If this attempt
1655 * fails to complete then it will be scheduled and this function called
1656 * again later from elsewhere.
1657 */
1658
mc32_reset_multicast_list(struct net_device * dev)1659 static void mc32_reset_multicast_list(struct net_device *dev)
1660 {
1661 do_mc32_set_multicast_list(dev,1);
1662 }
1663
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1664 static void netdev_get_drvinfo(struct net_device *dev,
1665 struct ethtool_drvinfo *info)
1666 {
1667 strcpy(info->driver, DRV_NAME);
1668 strcpy(info->version, DRV_VERSION);
1669 sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
1670 }
1671
netdev_get_msglevel(struct net_device * dev)1672 static u32 netdev_get_msglevel(struct net_device *dev)
1673 {
1674 return mc32_debug;
1675 }
1676
netdev_set_msglevel(struct net_device * dev,u32 level)1677 static void netdev_set_msglevel(struct net_device *dev, u32 level)
1678 {
1679 mc32_debug = level;
1680 }
1681
1682 static struct ethtool_ops netdev_ethtool_ops = {
1683 .get_drvinfo = netdev_get_drvinfo,
1684 .get_msglevel = netdev_get_msglevel,
1685 .set_msglevel = netdev_set_msglevel,
1686 };
1687
1688 #ifdef MODULE
1689
1690 static struct net_device this_device;
1691
1692 /**
1693 * init_module - entry point
1694 *
1695 * Probe and locate a 3c527 card. This really should probe and locate
1696 * all the 3c527 cards in the machine not just one of them. Yes you can
1697 * insmod multiple modules for now but it's a hack.
1698 */
1699
init_module(void)1700 int init_module(void)
1701 {
1702 int result;
1703
1704 this_device.init = mc32_probe;
1705 if ((result = register_netdev(&this_device)) != 0)
1706 return result;
1707
1708 return 0;
1709 }
1710
1711 /**
1712 * cleanup_module - free resources for an unload
1713 *
1714 * Unloading time. We release the MCA bus resources and the interrupt
1715 * at which point everything is ready to unload. The card must be stopped
1716 * at this point or we would not have been called. When we unload we
1717 * leave the card stopped but not totally shut down. When the card is
1718 * initialized it must be rebooted or the rings reloaded before any
1719 * transmit operations are allowed to start scribbling into memory.
1720 */
1721
cleanup_module(void)1722 void cleanup_module(void)
1723 {
1724 int slot;
1725
1726 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1727 unregister_netdev(&this_device);
1728
1729 /*
1730 * If we don't do this, we can't re-insmod it later.
1731 */
1732
1733 if (this_device.priv)
1734 {
1735 struct mc32_local *lp=this_device.priv;
1736 slot = lp->slot;
1737 mca_mark_as_unused(slot);
1738 mca_set_adapter_name(slot, NULL);
1739 kfree(this_device.priv);
1740 }
1741 free_irq(this_device.irq, &this_device);
1742 release_region(this_device.base_addr, MC32_IO_EXTENT);
1743 }
1744
1745 #endif /* MODULE */
1746