1 /*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
4 *
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
7 *
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
10 *
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
13 *
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
16 *
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
19 *
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
22 *
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
41 *
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
43 *
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
45 *
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
48 *
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
51 *
52 * 06/02/01 - Clean up, copy skb for small packets
53 *
54 * 06/22/01 - Add EISR error handling routines
55 *
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
69 *
70 * Wake on lan
71 *
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
76 */
77
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
79
80 #define OLYMPIC_DEBUG 0
81
82
83 #include <linux/config.h>
84 #include <linux/module.h>
85
86 #include <linux/kernel.h>
87 #include <linux/sched.h>
88 #include <linux/errno.h>
89 #include <linux/timer.h>
90 #include <linux/in.h>
91 #include <linux/ioport.h>
92 #include <linux/string.h>
93 #include <linux/proc_fs.h>
94 #include <linux/ptrace.h>
95 #include <linux/skbuff.h>
96 #include <linux/interrupt.h>
97 #include <linux/delay.h>
98 #include <linux/netdevice.h>
99 #include <linux/trdevice.h>
100 #include <linux/stddef.h>
101 #include <linux/init.h>
102 #include <linux/pci.h>
103 #include <linux/spinlock.h>
104 #include <net/checksum.h>
105
106 #include <asm/io.h>
107 #include <asm/system.h>
108 #include <asm/bitops.h>
109
110 #include "olympic.h"
111
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 *
117 * Official releases will only have an a.b.c version number format.
118 */
119
120 static char version[] __devinitdata =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
127 "Unknown stage"};
128
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134
135 /* Module paramters */
136
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139
140 /* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
148 *
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
151 */
152
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 MODULE_PARM(ringspeed, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
155
156 /* Packet buffer size */
157
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 MODULE_PARM(pkt_buf_sz, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
160
161 /* Message Level */
162
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 MODULE_PARM(message_level, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
165
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
171 */
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 MODULE_PARM(network_monitor, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
174
175 static struct pci_device_id olympic_pci_tbl[] __devinitdata = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
178 };
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
180
181
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186 static int olympic_close(struct net_device *dev);
187 static void olympic_set_rx_mode(struct net_device *dev);
188 static void olympic_freemem(struct net_device *dev) ;
189 static void olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
190 static struct net_device_stats * olympic_get_stats(struct net_device *dev);
191 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192 static void olympic_arb_cmd(struct net_device *dev);
193 static int olympic_change_mtu(struct net_device *dev, int mtu);
194 static void olympic_srb_bh(struct net_device *dev) ;
195 static void olympic_asb_bh(struct net_device *dev) ;
196 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197
olympic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)198 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199 {
200 struct net_device *dev ;
201 struct olympic_private *olympic_priv;
202 static int card_no = -1 ;
203 int i ;
204
205 card_no++ ;
206
207 if ((i = pci_enable_device(pdev))) {
208 return i ;
209 }
210
211 pci_set_master(pdev);
212
213 if ((i = pci_request_regions(pdev,"olympic"))) {
214 return i ;
215 } ;
216
217 dev = alloc_trdev(sizeof(struct olympic_private)) ;
218
219 if (!dev) {
220 pci_release_regions(pdev) ;
221 return -ENOMEM ;
222 }
223
224 olympic_priv = dev->priv ;
225
226 init_waitqueue_head(&olympic_priv->srb_wait);
227 init_waitqueue_head(&olympic_priv->trb_wait);
228 #if OLYMPIC_DEBUG
229 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
230 #endif
231 dev->irq=pdev->irq;
232 dev->base_addr=pci_resource_start(pdev, 0);
233 dev->init=NULL; /* Must be NULL otherwise we get called twice */
234 olympic_priv->olympic_card_name = (char *)pdev->name ;
235 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
236 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
237 olympic_priv->pdev = pdev ;
238
239 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
240 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
241 else
242 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
243
244 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
245 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
246 olympic_priv->olympic_message_level = message_level[card_no] ;
247 olympic_priv->olympic_network_monitor = network_monitor[card_no];
248
249 if((i = olympic_init(dev))) {
250 iounmap(olympic_priv->olympic_mmio) ;
251 iounmap(olympic_priv->olympic_lap) ;
252 kfree(dev) ;
253 pci_release_regions(pdev) ;
254 return i ;
255 }
256
257 dev->open=&olympic_open;
258 dev->hard_start_xmit=&olympic_xmit;
259 dev->change_mtu=&olympic_change_mtu;
260 dev->stop=&olympic_close;
261 dev->do_ioctl=NULL;
262 dev->set_multicast_list=&olympic_set_rx_mode;
263 dev->get_stats=&olympic_get_stats ;
264 dev->set_mac_address=&olympic_set_mac_address ;
265 SET_MODULE_OWNER(dev) ;
266
267 pci_set_drvdata(pdev,dev) ;
268 register_netdev(dev) ;
269 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
270 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
271 char proc_name[20] ;
272 strcpy(proc_name,"net/olympic_") ;
273 strcat(proc_name,dev->name) ;
274 create_proc_read_entry(proc_name,0,0,olympic_proc_info,(void *)dev) ;
275 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
276 }
277 return 0 ;
278 }
279
olympic_init(struct net_device * dev)280 static int __devinit olympic_init(struct net_device *dev)
281 {
282 struct olympic_private *olympic_priv;
283 u8 *olympic_mmio, *init_srb,*adapter_addr;
284 unsigned long t;
285 unsigned int uaa_addr;
286
287 olympic_priv=(struct olympic_private *)dev->priv;
288 olympic_mmio=olympic_priv->olympic_mmio;
289
290 printk("%s \n", version);
291 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
292
293 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
294 t=jiffies;
295 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
296 schedule();
297 if(jiffies-t > 40*HZ) {
298 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
299 return -ENODEV;
300 }
301 }
302
303 spin_lock_init(&olympic_priv->olympic_lock) ;
304
305 /* Needed for cardbus */
306 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
307 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
308 }
309
310 #if OLYMPIC_DEBUG
311 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
312 printk("GPR: %x\n",readw(olympic_mmio+GPR));
313 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
314 #endif
315 /* Aaaahhh, You have got to be real careful setting GPR, the card
316 holds the previous values from flash memory, including autosense
317 and ring speed */
318
319 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
320
321 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
322 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
323 if (olympic_priv->olympic_message_level)
324 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
325 } else if (olympic_priv->olympic_ring_speed == 16) {
326 if (olympic_priv->olympic_message_level)
327 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
328 writew(GPR_16MBPS, olympic_mmio+GPR);
329 } else if (olympic_priv->olympic_ring_speed == 4) {
330 if (olympic_priv->olympic_message_level)
331 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
332 writew(0, olympic_mmio+GPR);
333 }
334
335 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
336
337 #if OLYMPIC_DEBUG
338 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
339 #endif
340 /* Solo has been paused to meet the Cardbus power
341 * specs if the adapter is cardbus. Check to
342 * see its been paused and then restart solo. The
343 * adapter should set the pause bit within 1 second.
344 */
345
346 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
347 t=jiffies;
348 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
349 schedule() ;
350 if(jiffies-t > 2*HZ) {
351 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
352 return -ENODEV;
353 }
354 }
355 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
356 }
357
358 /* start solo init */
359 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
360
361 t=jiffies;
362 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
363 schedule();
364 if(jiffies-t > 15*HZ) {
365 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
366 return -ENODEV;
367 }
368 }
369
370 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
371
372 #if OLYMPIC_DEBUG
373 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
374 #endif
375
376 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
377
378 #if OLYMPIC_DEBUG
379 {
380 int i;
381 printk("init_srb(%p): ",init_srb);
382 for(i=0;i<20;i++)
383 printk("%x ",readb(init_srb+i));
384 printk("\n");
385 }
386 #endif
387 if(readw(init_srb+6)) {
388 printk(KERN_INFO "tokenring card intialization failed. errorcode : %x\n",readw(init_srb+6));
389 return -ENODEV;
390 }
391
392 if (olympic_priv->olympic_message_level) {
393 if ( readb(init_srb +2) & 0x40) {
394 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
395 } else {
396 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
397 }
398 }
399
400 uaa_addr=swab16(readw(init_srb+8));
401
402 #if OLYMPIC_DEBUG
403 printk("UAA resides at %x\n",uaa_addr);
404 #endif
405
406 writel(uaa_addr,olympic_mmio+LAPA);
407 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
408
409 #if OLYMPIC_DEBUG
410 printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
411 readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
412 readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
413 #endif
414
415 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
416
417 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
418 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
419
420 return 0;
421
422 }
423
olympic_open(struct net_device * dev)424 static int olympic_open(struct net_device *dev)
425 {
426 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
427 u8 *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
428 unsigned long flags, t;
429 char open_error[255] ;
430 int i, open_finished = 1 ;
431
432 DECLARE_WAITQUEUE(wait,current) ;
433
434 if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
435 return -EAGAIN;
436 }
437
438 #if OLYMPIC_DEBUG
439 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
440 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
441 #endif
442
443 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
444
445 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
446
447 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
448
449 /* adapter is closed, so SRB is pointed to by LAPWWO */
450
451 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
452 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
453
454 #if OLYMPIC_DEBUG
455 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
456 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
457 printk("Before the open command \n");
458 #endif
459 do {
460 memset_io(init_srb,0,SRB_COMMAND_SIZE);
461
462 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
463 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
464
465 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
466 if (olympic_priv->olympic_network_monitor)
467 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
468 else
469 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
470
471 /* Test OR of first 3 bytes as its totally possible for
472 * someone to set the first 2 bytes to be zero, although this
473 * is an error, the first byte must have bit 6 set to 1 */
474
475 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
476 writeb(olympic_priv->olympic_laa[0],init_srb+12);
477 writeb(olympic_priv->olympic_laa[1],init_srb+13);
478 writeb(olympic_priv->olympic_laa[2],init_srb+14);
479 writeb(olympic_priv->olympic_laa[3],init_srb+15);
480 writeb(olympic_priv->olympic_laa[4],init_srb+16);
481 writeb(olympic_priv->olympic_laa[5],init_srb+17);
482 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
483 }
484 writeb(1,init_srb+30);
485
486 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
487 olympic_priv->srb_queued=1;
488
489 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
490 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
491
492 t = jiffies ;
493
494 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
495 set_current_state(TASK_INTERRUPTIBLE) ;
496
497 while(olympic_priv->srb_queued) {
498 schedule() ;
499 if(signal_pending(current)) {
500 printk(KERN_WARNING "%s: Signal received in open.\n",
501 dev->name);
502 printk(KERN_WARNING "SISR=%x LISR=%x\n",
503 readl(olympic_mmio+SISR),
504 readl(olympic_mmio+LISR));
505 olympic_priv->srb_queued=0;
506 break;
507 }
508 if ((jiffies-t) > 10*HZ) {
509 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
510 olympic_priv->srb_queued=0;
511 break ;
512 }
513 set_current_state(TASK_INTERRUPTIBLE) ;
514 }
515 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
516 set_current_state(TASK_RUNNING) ;
517 olympic_priv->srb_queued = 0 ;
518 #if OLYMPIC_DEBUG
519 printk("init_srb(%p): ",init_srb);
520 for(i=0;i<20;i++)
521 printk("%02x ",readb(init_srb+i));
522 printk("\n");
523 #endif
524
525 /* If we get the same return response as we set, the interrupt wasn't raised and the open
526 * timed out.
527 */
528
529 if(readb(init_srb+2)== OLYMPIC_CLEAR_RET_CODE) {
530 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
531 return -EIO ;
532 }
533
534 if(readb(init_srb+2)!=0) {
535 if (readb(init_srb+2) == 0x07) {
536 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
537 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
538 open_finished = 0 ;
539 } else {
540
541 strcpy(open_error, open_maj_error[(readb(init_srb+7) & 0xf0) >> 4]) ;
542 strcat(open_error," - ") ;
543 strcat(open_error, open_min_error[(readb(init_srb+7) & 0x0f)]) ;
544
545 if (!olympic_priv->olympic_ring_speed && ((readb(init_srb+7) & 0x0f) == 0x0d)) {
546 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
547 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
548 free_irq(dev->irq, dev);
549 return -EIO ;
550 }
551
552 printk(KERN_WARNING "%s: %s\n",dev->name,open_error);
553 free_irq(dev->irq,dev) ;
554 return -EIO ;
555
556 } /* if autosense && open_finished */
557 } else if (init_srb[2] == 0x32) {
558 printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
559 dev->name,
560 olympic_priv->olympic_laa[0],
561 olympic_priv->olympic_laa[1],
562 olympic_priv->olympic_laa[2],
563 olympic_priv->olympic_laa[3],
564 olympic_priv->olympic_laa[4],
565 olympic_priv->olympic_laa[5]) ;
566 free_irq(dev->irq,dev) ;
567 return -EIO ;
568 } else {
569 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name,init_srb[2]);
570 free_irq(dev->irq, dev);
571 return -EIO;
572 }
573 } else
574 open_finished = 1 ;
575 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
576
577 if (readb(init_srb+18) & (1<<3))
578 if (olympic_priv->olympic_message_level)
579 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
580
581 if (readb(init_srb+18) & (1<<1))
582 olympic_priv->olympic_ring_speed = 100 ;
583 else if (readb(init_srb+18) & 1)
584 olympic_priv->olympic_ring_speed = 16 ;
585 else
586 olympic_priv->olympic_ring_speed = 4 ;
587
588 if (olympic_priv->olympic_message_level)
589 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
590
591 olympic_priv->asb = swab16(readw(init_srb+8));
592 olympic_priv->srb = swab16(readw(init_srb+10));
593 olympic_priv->arb = swab16(readw(init_srb+12));
594 olympic_priv->trb = swab16(readw(init_srb+16));
595
596 olympic_priv->olympic_receive_options = 0x01 ;
597 olympic_priv->olympic_copy_all_options = 0 ;
598
599 /* setup rx ring */
600
601 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
602
603 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
604
605 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
606
607 struct sk_buff *skb;
608
609 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
610 if(skb == NULL)
611 break;
612
613 skb->dev = dev;
614
615 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
616 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
617 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
618 olympic_priv->rx_ring_skb[i]=skb;
619 }
620
621 if (i==0) {
622 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
623 free_irq(dev->irq, dev);
624 return -EIO;
625 }
626
627 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
628 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
629 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
630 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
631 writew(i, olympic_mmio+RXDESCQCNT);
632
633 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
634 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
635 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
636 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
637
638 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
639 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
640
641 writew(i, olympic_mmio+RXSTATQCNT);
642
643 #if OLYMPIC_DEBUG
644 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
645 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
646 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
647 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
648 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
649
650 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
651 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
652 #endif
653
654 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
655
656 #if OLYMPIC_DEBUG
657 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
658 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
659 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
660 #endif
661
662 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
663
664 /* setup tx ring */
665
666 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
667 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
668 olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
669
670 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
671 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
672 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
673 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
674 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
675 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
676
677 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
678 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
679 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
680 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
681 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
682
683 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
684 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
685
686 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
687 writel(0,olympic_mmio+EISR) ;
688 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
689 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
690
691 #if OLYMPIC_DEBUG
692 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
693 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
694 #endif
695
696 if (olympic_priv->olympic_network_monitor) {
697 u8 *oat ;
698 u8 *opt ;
699 oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
700 opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
701
702 printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
703 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
704 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
705 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
706 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
707 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
708 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
709 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
710 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
714 printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
715 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
716 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
717 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
718 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
719 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
720 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
721 }
722
723 netif_start_queue(dev);
724 return 0;
725
726 }
727
728 /*
729 * When we enter the rx routine we do not know how many frames have been
730 * queued on the rx channel. Therefore we start at the next rx status
731 * position and travel around the receive ring until we have completed
732 * all the frames.
733 *
734 * This means that we may process the frame before we receive the end
735 * of frame interrupt. This is why we always test the status instead
736 * of blindly processing the next frame.
737 *
738 * We also remove the last 4 bytes from the packet as well, these are
739 * just token ring trailer info and upset protocols that don't check
740 * their own length, i.e. SNA.
741 *
742 */
olympic_rx(struct net_device * dev)743 static void olympic_rx(struct net_device *dev)
744 {
745 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
746 u8 *olympic_mmio=olympic_priv->olympic_mmio;
747 struct olympic_rx_status *rx_status;
748 struct olympic_rx_desc *rx_desc ;
749 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
750 struct sk_buff *skb, *skb2;
751 int i;
752
753 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
754
755 while (rx_status->status_buffercnt) {
756 u32 l_status_buffercnt;
757
758 olympic_priv->rx_status_last_received++ ;
759 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
760 #if OLYMPIC_DEBUG
761 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
762 #endif
763 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
764 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
765 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
766 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
767
768 #if OLYMPIC_DEBUG
769 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
770 #endif
771 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
772 if(l_status_buffercnt & 0xC0000000) {
773 if (l_status_buffercnt & 0x3B000000) {
774 if (olympic_priv->olympic_message_level) {
775 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
776 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
777 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
778 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
779 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
780 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
781 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
782 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
783 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
784 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
785 }
786 olympic_priv->rx_ring_last_received += i ;
787 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
788 olympic_priv->olympic_stats.rx_errors++;
789 } else {
790
791 if (buffer_cnt == 1) {
792 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
793 } else {
794 skb = dev_alloc_skb(length) ;
795 }
796
797 if (skb == NULL) {
798 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
799 olympic_priv->olympic_stats.rx_dropped++ ;
800 /* Update counters even though we don't transfer the frame */
801 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
803 } else {
804 skb->dev = dev ;
805
806 /* Optimise based upon number of buffers used.
807 If only one buffer is used we can simply swap the buffers around.
808 If more than one then we must use the new buffer and copy the information
809 first. Ideally all frames would be in a single buffer, this can be tuned by
810 altering the buffer size. If the length of the packet is less than
811 1500 bytes we're going to copy it over anyway to stop packets getting
812 dropped from sockets with buffers smaller than our pkt_buf_sz. */
813
814 if (buffer_cnt==1) {
815 olympic_priv->rx_ring_last_received++ ;
816 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
817 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
818 if (length > 1500) {
819 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
820 /* unmap buffer */
821 pci_unmap_single(olympic_priv->pdev,
822 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
823 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
824 skb_put(skb2,length-4);
825 skb2->protocol = tr_type_trans(skb2,dev);
826 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
827 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
828 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
829 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
830 cpu_to_le32(olympic_priv->pkt_buf_sz);
831 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
832 netif_rx(skb2) ;
833 } else {
834 pci_dma_sync_single(olympic_priv->pdev,
835 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
836 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
837 memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
838 skb->protocol = tr_type_trans(skb,dev) ;
839 netif_rx(skb) ;
840 }
841 } else {
842 do { /* Walk the buffers */
843 olympic_priv->rx_ring_last_received++ ;
844 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
845 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
846 pci_dma_sync_single(olympic_priv->pdev,
847 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
848 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
849 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
850 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
851 memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
852 } while (--i) ;
853 skb_trim(skb,skb->len-4) ;
854 skb->protocol = tr_type_trans(skb,dev);
855 netif_rx(skb) ;
856 }
857 dev->last_rx = jiffies ;
858 olympic_priv->olympic_stats.rx_packets++ ;
859 olympic_priv->olympic_stats.rx_bytes += length ;
860 } /* if skb == null */
861 } /* If status & 0x3b */
862
863 } else { /*if buffercnt & 0xC */
864 olympic_priv->rx_ring_last_received += i ;
865 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
866 }
867
868 rx_status->fragmentcnt_framelen = 0 ;
869 rx_status->status_buffercnt = 0 ;
870 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
871
872 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
873 } /* while */
874
875 }
876
olympic_freemem(struct net_device * dev)877 static void olympic_freemem(struct net_device *dev)
878 {
879 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
880 int i;
881
882 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
883 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
884 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
885 pci_unmap_single(olympic_priv->pdev,
886 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
887 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
888 }
889 olympic_priv->rx_status_last_received++;
890 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
891 }
892 /* unmap rings */
893 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
894 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
895 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
896 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
897
898 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
899 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
900 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
901 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
902
903 return ;
904 }
905
olympic_interrupt(int irq,void * dev_id,struct pt_regs * regs)906 static void olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
907 {
908 struct net_device *dev= (struct net_device *)dev_id;
909 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
910 u8 *olympic_mmio=olympic_priv->olympic_mmio;
911 u32 sisr;
912 u8 *adapter_check_area ;
913
914 /*
915 * Read sisr but don't reset it yet.
916 * The indication bit may have been set but the interrupt latch
917 * bit may not be set, so we'd lose the interrupt later.
918 */
919 sisr=readl(olympic_mmio+SISR) ;
920 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
921 return ;
922 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
923
924 spin_lock(&olympic_priv->olympic_lock);
925
926 /* Hotswap gives us this on removal */
927 if (sisr == 0xffffffff) {
928 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
929 olympic_freemem(dev) ;
930 free_irq(dev->irq, dev) ;
931 dev->stop = NULL ;
932 spin_unlock(&olympic_priv->olympic_lock) ;
933 return ;
934 }
935
936 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
937 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
938
939 /* If we ever get this the adapter is seriously dead. Only a reset is going to
940 * bring it back to life. We're talking pci bus errors and such like :( */
941 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
942 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
943 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
944 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
945 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
946 olympic_freemem(dev) ;
947 free_irq(dev->irq, dev) ;
948 dev->stop = NULL ;
949 spin_unlock(&olympic_priv->olympic_lock) ;
950 return ;
951 } /* SISR_ERR */
952
953 if(sisr & SISR_SRB_REPLY) {
954 if(olympic_priv->srb_queued==1) {
955 wake_up_interruptible(&olympic_priv->srb_wait);
956 } else if (olympic_priv->srb_queued==2) {
957 olympic_srb_bh(dev) ;
958 }
959 olympic_priv->srb_queued=0;
960 } /* SISR_SRB_REPLY */
961
962 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
963 we get all tx completions. */
964 if (sisr & SISR_TX1_EOF) {
965 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
966 olympic_priv->tx_ring_last_status++;
967 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
968 olympic_priv->free_tx_ring_entries++;
969 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
970 olympic_priv->olympic_stats.tx_packets++ ;
971 pci_unmap_single(olympic_priv->pdev,
972 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
973 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
974 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
975 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
976 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
977 }
978 netif_wake_queue(dev);
979 } /* SISR_TX1_EOF */
980
981 if (sisr & SISR_RX_STATUS) {
982 olympic_rx(dev);
983 } /* SISR_RX_STATUS */
984
985 if (sisr & SISR_ADAPTER_CHECK) {
986 netif_stop_queue(dev);
987 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
988 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
989 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
990 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
991 olympic_freemem(dev) ;
992 free_irq(dev->irq, dev) ;
993 dev->stop = NULL ;
994 spin_unlock(&olympic_priv->olympic_lock) ;
995 return ;
996 } /* SISR_ADAPTER_CHECK */
997
998 if (sisr & SISR_ASB_FREE) {
999 /* Wake up anything that is waiting for the asb response */
1000 if (olympic_priv->asb_queued) {
1001 olympic_asb_bh(dev) ;
1002 }
1003 } /* SISR_ASB_FREE */
1004
1005 if (sisr & SISR_ARB_CMD) {
1006 olympic_arb_cmd(dev) ;
1007 } /* SISR_ARB_CMD */
1008
1009 if (sisr & SISR_TRB_REPLY) {
1010 /* Wake up anything that is waiting for the trb response */
1011 if (olympic_priv->trb_queued) {
1012 wake_up_interruptible(&olympic_priv->trb_wait);
1013 }
1014 olympic_priv->trb_queued = 0 ;
1015 } /* SISR_TRB_REPLY */
1016
1017 if (sisr & SISR_RX_NOBUF) {
1018 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1019 /var/log/messages. */
1020 } /* SISR_RX_NOBUF */
1021 } else {
1022 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1023 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1024 } /* One if the interrupts we want */
1025 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1026
1027 spin_unlock(&olympic_priv->olympic_lock) ;
1028 }
1029
olympic_xmit(struct sk_buff * skb,struct net_device * dev)1030 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1031 {
1032 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1033 u8 *olympic_mmio=olympic_priv->olympic_mmio;
1034 unsigned long flags ;
1035
1036 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1037
1038 netif_stop_queue(dev);
1039
1040 if(olympic_priv->free_tx_ring_entries) {
1041 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1042 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1043 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1044 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1045 olympic_priv->free_tx_ring_entries--;
1046
1047 olympic_priv->tx_ring_free++;
1048 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1049 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1050 netif_wake_queue(dev);
1051 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1052 return 0;
1053 } else {
1054 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1055 return 1;
1056 }
1057
1058 }
1059
1060
olympic_close(struct net_device * dev)1061 static int olympic_close(struct net_device *dev)
1062 {
1063 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1064 u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1065 unsigned long t,flags;
1066
1067 DECLARE_WAITQUEUE(wait,current) ;
1068
1069 netif_stop_queue(dev);
1070
1071 writel(olympic_priv->srb,olympic_mmio+LAPA);
1072 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1073
1074 writeb(SRB_CLOSE_ADAPTER,srb+0);
1075 writeb(0,srb+1);
1076 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1077
1078 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1079 olympic_priv->srb_queued=1;
1080
1081 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1082 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1083
1084 t = jiffies ;
1085
1086 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1087 set_current_state(TASK_INTERRUPTIBLE) ;
1088
1089 while(olympic_priv->srb_queued) {
1090 schedule() ;
1091 if(signal_pending(current)) {
1092 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1093 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1094 olympic_priv->srb_queued=0;
1095 break;
1096 }
1097 if ((jiffies-t) > 60*HZ) {
1098 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1099 olympic_priv->srb_queued=0;
1100 break ;
1101 }
1102 set_current_state(TASK_INTERRUPTIBLE) ;
1103 }
1104 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1105 set_current_state(TASK_RUNNING) ;
1106
1107 olympic_priv->rx_status_last_received++;
1108 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1109
1110 olympic_freemem(dev) ;
1111
1112 /* reset tx/rx fifo's and busmaster logic */
1113
1114 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1115 udelay(1);
1116 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1117
1118 #if OLYMPIC_DEBUG
1119 {
1120 int i ;
1121 printk("srb(%p): ",srb);
1122 for(i=0;i<4;i++)
1123 printk("%x ",readb(srb+i));
1124 printk("\n");
1125 }
1126 #endif
1127 free_irq(dev->irq,dev);
1128
1129 return 0;
1130
1131 }
1132
olympic_set_rx_mode(struct net_device * dev)1133 static void olympic_set_rx_mode(struct net_device *dev)
1134 {
1135 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1136 u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1137 u8 options = 0;
1138 u8 *srb;
1139 struct dev_mc_list *dmi ;
1140 unsigned char dev_mc_address[4] ;
1141 int i ;
1142
1143 writel(olympic_priv->srb,olympic_mmio+LAPA);
1144 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1145 options = olympic_priv->olympic_copy_all_options;
1146
1147 if (dev->flags&IFF_PROMISC)
1148 options |= 0x61 ;
1149 else
1150 options &= ~0x61 ;
1151
1152 /* Only issue the srb if there is a change in options */
1153
1154 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1155
1156 /* Now to issue the srb command to alter the copy.all.options */
1157
1158 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1159 writeb(0,srb+1);
1160 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1161 writeb(0,srb+3);
1162 writeb(olympic_priv->olympic_receive_options,srb+4);
1163 writeb(options,srb+5);
1164
1165 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1166
1167 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1168
1169 olympic_priv->olympic_copy_all_options = options ;
1170
1171 return ;
1172 }
1173
1174 /* Set the functional addresses we need for multicast */
1175
1176 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1177
1178 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1179 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1180 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1181 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1182 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1183 }
1184
1185 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1186 writeb(0,srb+1);
1187 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1188 writeb(0,srb+3);
1189 writeb(0,srb+4);
1190 writeb(0,srb+5);
1191 writeb(dev_mc_address[0],srb+6);
1192 writeb(dev_mc_address[1],srb+7);
1193 writeb(dev_mc_address[2],srb+8);
1194 writeb(dev_mc_address[3],srb+9);
1195
1196 olympic_priv->srb_queued = 2 ;
1197 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1198
1199 }
1200
olympic_srb_bh(struct net_device * dev)1201 static void olympic_srb_bh(struct net_device *dev)
1202 {
1203 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1204 u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1205 u8 *srb;
1206
1207 writel(olympic_priv->srb,olympic_mmio+LAPA);
1208 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1209
1210 switch (readb(srb)) {
1211
1212 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1213 * At some point we should do something if we get an error, such as
1214 * resetting the IFF_PROMISC flag in dev
1215 */
1216
1217 case SRB_MODIFY_RECEIVE_OPTIONS:
1218 switch (readb(srb+2)) {
1219 case 0x01:
1220 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1221 break ;
1222 case 0x04:
1223 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1224 break ;
1225 default:
1226 if (olympic_priv->olympic_message_level)
1227 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1228 break ;
1229 } /* switch srb[2] */
1230 break ;
1231
1232 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1233 */
1234
1235 case SRB_SET_GROUP_ADDRESS:
1236 switch (readb(srb+2)) {
1237 case 0x00:
1238 break ;
1239 case 0x01:
1240 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1241 break ;
1242 case 0x04:
1243 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1244 break ;
1245 case 0x3c:
1246 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1247 break ;
1248 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1249 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1250 break ;
1251 case 0x55:
1252 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1253 break ;
1254 default:
1255 break ;
1256 } /* switch srb[2] */
1257 break ;
1258
1259 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1260 */
1261
1262 case SRB_RESET_GROUP_ADDRESS:
1263 switch (readb(srb+2)) {
1264 case 0x00:
1265 break ;
1266 case 0x01:
1267 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1268 break ;
1269 case 0x04:
1270 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1271 break ;
1272 case 0x39: /* Must deal with this if individual multicast addresses used */
1273 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1274 break ;
1275 default:
1276 break ;
1277 } /* switch srb[2] */
1278 break ;
1279
1280
1281 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1282 */
1283
1284 case SRB_SET_FUNC_ADDRESS:
1285 switch (readb(srb+2)) {
1286 case 0x00:
1287 if (olympic_priv->olympic_message_level)
1288 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1289 break ;
1290 case 0x01:
1291 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1292 break ;
1293 case 0x04:
1294 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1295 break ;
1296 default:
1297 break ;
1298 } /* switch srb[2] */
1299 break ;
1300
1301 /* SRB_READ_LOG - Read and reset the adapter error counters
1302 */
1303
1304 case SRB_READ_LOG:
1305 switch (readb(srb+2)) {
1306 case 0x00:
1307 if (olympic_priv->olympic_message_level)
1308 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1309 break ;
1310 case 0x01:
1311 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1312 break ;
1313 case 0x04:
1314 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1315 break ;
1316
1317 } /* switch srb[2] */
1318 break ;
1319
1320 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1321
1322 case SRB_READ_SR_COUNTERS:
1323 switch (readb(srb+2)) {
1324 case 0x00:
1325 if (olympic_priv->olympic_message_level)
1326 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1327 break ;
1328 case 0x01:
1329 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1330 break ;
1331 case 0x04:
1332 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1333 break ;
1334 default:
1335 break ;
1336 } /* switch srb[2] */
1337 break ;
1338
1339 default:
1340 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1341 break ;
1342 } /* switch srb[0] */
1343
1344 }
1345
olympic_get_stats(struct net_device * dev)1346 static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1347 {
1348 struct olympic_private *olympic_priv ;
1349 olympic_priv=(struct olympic_private *) dev->priv;
1350 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1351 }
1352
olympic_set_mac_address(struct net_device * dev,void * addr)1353 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1354 {
1355 struct sockaddr *saddr = addr ;
1356 struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1357
1358 if (netif_running(dev)) {
1359 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1360 return -EIO ;
1361 }
1362
1363 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1364
1365 if (olympic_priv->olympic_message_level) {
1366 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1367 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1368 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1369 olympic_priv->olympic_laa[5]);
1370 }
1371
1372 return 0 ;
1373 }
1374
olympic_arb_cmd(struct net_device * dev)1375 static void olympic_arb_cmd(struct net_device *dev)
1376 {
1377 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1378 u8 *olympic_mmio=olympic_priv->olympic_mmio;
1379 u8 *arb_block, *asb_block, *srb ;
1380 u8 header_len ;
1381 u16 frame_len, buffer_len ;
1382 struct sk_buff *mac_frame ;
1383 u8 *buf_ptr ;
1384 u8 *frame_data ;
1385 u16 buff_off ;
1386 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1387 u8 fdx_prot_error ;
1388 u16 next_ptr;
1389 int i ;
1390
1391 arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1392 asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1393 srb = (u8 *)(olympic_priv->olympic_lap + olympic_priv->srb) ;
1394
1395 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1396
1397 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1398 frame_len = swab16(readw(arb_block + 10)) ;
1399
1400 buff_off = swab16(readw(arb_block + 6)) ;
1401
1402 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1403
1404 #if OLYMPIC_DEBUG
1405 {
1406 int i;
1407 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1408
1409 for (i=0 ; i < 14 ; i++) {
1410 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1411 }
1412
1413 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1414 }
1415 #endif
1416 mac_frame = dev_alloc_skb(frame_len) ;
1417 if (!mac_frame) {
1418 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1419 goto drop_frame;
1420 }
1421
1422 /* Walk the buffer chain, creating the frame */
1423
1424 do {
1425 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1426 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1427 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1428 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1429 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1430
1431 if (olympic_priv->olympic_network_monitor) {
1432 struct trh_hdr *mac_hdr ;
1433 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1434 mac_hdr = (struct trh_hdr *)mac_frame->data ;
1435 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1436 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1437 }
1438 mac_frame->dev = dev ;
1439 mac_frame->protocol = tr_type_trans(mac_frame,dev);
1440 netif_rx(mac_frame) ;
1441 dev->last_rx = jiffies;
1442
1443 drop_frame:
1444 /* Now tell the card we have dealt with the received frame */
1445
1446 /* Set LISR Bit 1 */
1447 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1448
1449 /* Is the ASB free ? */
1450
1451 if (readb(asb_block + 2) != 0xff) {
1452 olympic_priv->asb_queued = 1 ;
1453 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1454 return ;
1455 /* Drop out and wait for the bottom half to be run */
1456 }
1457
1458 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1459 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1460 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1461 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1462
1463 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1464
1465 olympic_priv->asb_queued = 2 ;
1466
1467 return ;
1468
1469 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1470 lan_status = swab16(readw(arb_block+6));
1471 fdx_prot_error = readb(arb_block+8) ;
1472
1473 /* Issue ARB Free */
1474 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1475
1476 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1477
1478 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1479 if (lan_status_diff & LSC_LWF)
1480 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1481 if (lan_status_diff & LSC_ARW)
1482 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1483 if (lan_status_diff & LSC_FPE)
1484 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1485 if (lan_status_diff & LSC_RR)
1486 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1487
1488 /* Adapter has been closed by the hardware */
1489
1490 /* reset tx/rx fifo's and busmaster logic */
1491
1492 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1493 udelay(1);
1494 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1495 netif_stop_queue(dev);
1496 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1497 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
1498 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
1499 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
1500 pci_unmap_single(olympic_priv->pdev,
1501 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
1502 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1503 }
1504 olympic_priv->rx_status_last_received++;
1505 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1506 }
1507 /* unmap rings */
1508 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
1509 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
1510 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
1511 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
1512
1513 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
1514 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
1515 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
1516 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
1517
1518 free_irq(dev->irq,dev);
1519 dev->stop=NULL;
1520 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1521 } /* If serious error */
1522
1523 if (olympic_priv->olympic_message_level) {
1524 if (lan_status_diff & LSC_SIG_LOSS)
1525 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1526 if (lan_status_diff & LSC_HARD_ERR)
1527 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1528 if (lan_status_diff & LSC_SOFT_ERR)
1529 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1530 if (lan_status_diff & LSC_TRAN_BCN)
1531 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1532 if (lan_status_diff & LSC_SS)
1533 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1534 if (lan_status_diff & LSC_RING_REC)
1535 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1536 if (lan_status_diff & LSC_FDX_MODE)
1537 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1538 }
1539
1540 if (lan_status_diff & LSC_CO) {
1541
1542 if (olympic_priv->olympic_message_level)
1543 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1544
1545 /* Issue READ.LOG command */
1546
1547 writeb(SRB_READ_LOG, srb);
1548 writeb(0,srb+1);
1549 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1550 writeb(0,srb+3);
1551 writeb(0,srb+4);
1552 writeb(0,srb+5);
1553
1554 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1555
1556 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1557
1558 }
1559
1560 if (lan_status_diff & LSC_SR_CO) {
1561
1562 if (olympic_priv->olympic_message_level)
1563 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1564
1565 /* Issue a READ.SR.COUNTERS */
1566
1567 writeb(SRB_READ_SR_COUNTERS,srb);
1568 writeb(0,srb+1);
1569 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1570 writeb(0,srb+3);
1571
1572 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1573
1574 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1575
1576 }
1577
1578 olympic_priv->olympic_lan_status = lan_status ;
1579
1580 } /* Lan.change.status */
1581 else
1582 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1583 }
1584
olympic_asb_bh(struct net_device * dev)1585 static void olympic_asb_bh(struct net_device *dev)
1586 {
1587 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1588 u8 *arb_block, *asb_block ;
1589
1590 arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1591 asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1592
1593 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1594
1595 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1596 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1597 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1598 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1599
1600 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1601 olympic_priv->asb_queued = 2 ;
1602
1603 return ;
1604 }
1605
1606 if (olympic_priv->asb_queued == 2) {
1607 switch (readb(asb_block+2)) {
1608 case 0x01:
1609 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1610 break ;
1611 case 0x26:
1612 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1613 break ;
1614 case 0xFF:
1615 /* Valid response, everything should be ok again */
1616 break ;
1617 default:
1618 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1619 break ;
1620 }
1621 }
1622 olympic_priv->asb_queued = 0 ;
1623 }
1624
olympic_change_mtu(struct net_device * dev,int mtu)1625 static int olympic_change_mtu(struct net_device *dev, int mtu)
1626 {
1627 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1628 u16 max_mtu ;
1629
1630 if (olympic_priv->olympic_ring_speed == 4)
1631 max_mtu = 4500 ;
1632 else
1633 max_mtu = 18000 ;
1634
1635 if (mtu > max_mtu)
1636 return -EINVAL ;
1637 if (mtu < 100)
1638 return -EINVAL ;
1639
1640 dev->mtu = mtu ;
1641 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1642
1643 return 0 ;
1644 }
1645
olympic_proc_info(char * buffer,char ** start,off_t offset,int length,int * eof,void * data)1646 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1647 {
1648 struct net_device *dev = (struct net_device *)data ;
1649 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1650 u8 *oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1651 u8 *opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1652 int size = 0 ;
1653 int len=0;
1654 off_t begin=0;
1655 off_t pos=0;
1656
1657 size = sprintf(buffer,
1658 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1659 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1660 dev->name);
1661
1662 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1663 dev->name,
1664 dev->dev_addr[0],
1665 dev->dev_addr[1],
1666 dev->dev_addr[2],
1667 dev->dev_addr[3],
1668 dev->dev_addr[4],
1669 dev->dev_addr[5],
1670 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1671 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1672 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1673 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1674 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1675 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1676 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1677 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1678 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1679 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1680
1681 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1682
1683 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1684 dev->name) ;
1685
1686 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
1687 dev->name,
1688 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1689 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1690 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1691 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1692 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1693 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1694 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1695 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1696 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1697 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1698 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1699 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1700 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1701 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1702 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1703 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1704 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1705 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1706 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1707
1708 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1709 dev->name) ;
1710
1711 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1712 dev->name,
1713 readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1714 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1715 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1716 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1717 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1718 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1719 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1720 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1721 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1722 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1723 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1724 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1725
1726 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1727 dev->name) ;
1728
1729 size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
1730 dev->name,
1731 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1732 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1733 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1734 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1735 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1736 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1737 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1738 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1739 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1740 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1741 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1742 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1743
1744 len=size;
1745 pos=begin+size;
1746 if (pos<offset) {
1747 len=0;
1748 begin=pos;
1749 }
1750 *start=buffer+(offset-begin); /* Start of wanted data */
1751 len-=(offset-begin); /* Start slop */
1752 if(len>length)
1753 len=length; /* Ending slop */
1754 return len;
1755 }
1756
olympic_remove_one(struct pci_dev * pdev)1757 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1758 {
1759 struct net_device *dev = pci_get_drvdata(pdev) ;
1760 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1761
1762 if (olympic_priv->olympic_network_monitor) {
1763 char proc_name[20] ;
1764 strcpy(proc_name,"net/olympic_") ;
1765 strcat(proc_name,dev->name) ;
1766 remove_proc_entry(proc_name,NULL);
1767 }
1768 unregister_trdev(dev) ;
1769 iounmap(olympic_priv->olympic_mmio) ;
1770 iounmap(olympic_priv->olympic_lap) ;
1771 pci_release_regions(pdev) ;
1772 pci_set_drvdata(pdev,NULL) ;
1773 kfree(dev) ;
1774 }
1775
1776 static struct pci_driver olympic_driver = {
1777 name: "olympic",
1778 id_table: olympic_pci_tbl,
1779 probe: olympic_probe,
1780 remove: __devexit_p(olympic_remove_one),
1781 };
1782
olympic_pci_init(void)1783 static int __init olympic_pci_init(void)
1784 {
1785 return pci_module_init (&olympic_driver) ;
1786 }
1787
olympic_pci_cleanup(void)1788 static void __exit olympic_pci_cleanup(void)
1789 {
1790 return pci_unregister_driver(&olympic_driver) ;
1791 }
1792
1793
1794 module_init(olympic_pci_init) ;
1795 module_exit(olympic_pci_cleanup) ;
1796
1797 MODULE_LICENSE("GPL");
1798