1 /*
2 * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
3 *
4 * Copyright (C) 1998-2000 by Jes Sorensen, <Jes.Sorensen@cern.ch>.
5 *
6 * Thanks to Essential Communication for providing us with hardware
7 * and very comprehensive documentation without which I would not have
8 * been able to write this driver. A special thank you to John Gibbon
9 * for sorting out the legal issues, with the NDA, allowing the code to
10 * be released under the GPL.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
18 * stupid bugs in my code.
19 *
20 * Softnet support and various other patches from Val Henson of
21 * ODS/Essential.
22 */
23
24 #define DEBUG 1
25 #define RX_DMA_SKBUFF 1
26 #define PKT_COPY_THRESHOLD 512
27
28 #include <linux/config.h>
29 #include <linux/module.h>
30 #include <linux/version.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/ioport.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/hippidevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/mm.h>
42 #include <net/sock.h>
43
44 #include <asm/system.h>
45 #include <asm/cache.h>
46 #include <asm/byteorder.h>
47 #include <asm/io.h>
48 #include <asm/irq.h>
49 #include <asm/uaccess.h>
50
51 #if (LINUX_VERSION_CODE < 0x02030e)
52 #define net_device device
53 #endif
54
55 #if (LINUX_VERSION_CODE >= 0x02031b)
56 #define NEW_NETINIT
57 #endif
58
59 #if (LINUX_VERSION_CODE < 0x02032b)
60 /*
61 * SoftNet changes
62 */
63 #define dev_kfree_skb_irq(a) dev_kfree_skb(a)
64 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy)
65 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
66
netif_start_queue(struct net_device * dev)67 static inline void netif_start_queue(struct net_device *dev)
68 {
69 dev->tbusy = 0;
70 dev->start = 1;
71 }
72
73 #define rr_mark_net_bh(foo) mark_bh(foo)
74 #define rr_if_busy(dev) dev->tbusy
75 #define rr_if_running(dev) dev->start /* Currently unused. */
76 #define rr_if_down(dev) do { dev->start = 0; } while (0)
77 #else
78 #define NET_BH 0
79 #define rr_mark_net_bh(foo) do { } while(0)
80 #define rr_if_busy(dev) netif_queue_stopped(dev)
81 #define rr_if_running(dev) netif_running(dev)
82 #define rr_if_down(dev) do { } while(0)
83 #endif
84
85 #include "rrunner.h"
86
87 #define RUN_AT(x) (jiffies + (x))
88
89
90 /*
91 * Implementation notes:
92 *
93 * The DMA engine only allows for DMA within physical 64KB chunks of
94 * memory. The current approach of the driver (and stack) is to use
95 * linear blocks of memory for the skbuffs. However, as the data block
96 * is always the first part of the skb and skbs are 2^n aligned so we
97 * are guarantted to get the whole block within one 64KB align 64KB
98 * chunk.
99 *
100 * On the long term, relying on being able to allocate 64KB linear
101 * chunks of memory is not feasible and the skb handling code and the
102 * stack will need to know about I/O vectors or something similar.
103 */
104
105 static char version[] __initdata = "rrunner.c: v0.22 03/01/2000 Jes Sorensen (Jes.Sorensen@cern.ch)\n";
106
107 static struct net_device *root_dev;
108
109
110 /*
111 * These are checked at init time to see if they are at least 256KB
112 * and increased to 256KB if they are not. This is done to avoid ending
113 * up with socket buffers smaller than the MTU size,
114 */
115 extern __u32 sysctl_wmem_max;
116 extern __u32 sysctl_rmem_max;
117
118 static int probed __initdata = 0;
119
120 #if LINUX_VERSION_CODE >= 0x20400
121 static struct pci_device_id rrunner_pci_tbl[] __initdata = {
122 { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, PCI_ANY_ID, PCI_ANY_ID, },
123 { } /* Terminating entry */
124 };
125 MODULE_DEVICE_TABLE(pci, rrunner_pci_tbl);
126 #endif /* LINUX_VERSION_CODE >= 0x20400 */
127
128 #ifdef NEW_NETINIT
rr_hippi_probe(void)129 int __init rr_hippi_probe (void)
130 #else
131 int __init rr_hippi_probe (struct net_device *dev)
132 #endif
133 {
134 #ifdef NEW_NETINIT
135 struct net_device *dev;
136 #endif
137 int boards_found = 0;
138 int version_disp; /* was version info already displayed? */
139 struct pci_dev *pdev = NULL;
140 struct pci_dev *opdev = NULL;
141 u8 pci_latency;
142 struct rr_private *rrpriv;
143
144 if (probed)
145 return -ENODEV;
146 probed++;
147
148 version_disp = 0;
149
150 while((pdev = pci_find_device(PCI_VENDOR_ID_ESSENTIAL,
151 PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
152 pdev)))
153 {
154 if (pci_enable_device(pdev))
155 continue;
156
157 if (pdev == opdev)
158 return 0;
159
160 /*
161 * So we found our HIPPI ... time to tell the system.
162 */
163
164 dev = init_hippi_dev(NULL, sizeof(struct rr_private));
165
166 if (!dev)
167 break;
168
169 if (!dev->priv)
170 dev->priv = kmalloc(sizeof(*rrpriv), GFP_KERNEL);
171
172 if (!dev->priv)
173 return -ENOMEM;
174
175 rrpriv = (struct rr_private *)dev->priv;
176 memset(rrpriv, 0, sizeof(*rrpriv));
177
178 #ifdef CONFIG_SMP
179 spin_lock_init(&rrpriv->lock);
180 #endif
181 sprintf(rrpriv->name, "RoadRunner serial HIPPI");
182
183 dev->irq = pdev->irq;
184 SET_MODULE_OWNER(dev);
185 dev->open = &rr_open;
186 dev->hard_start_xmit = &rr_start_xmit;
187 dev->stop = &rr_close;
188 dev->get_stats = &rr_get_stats;
189 dev->do_ioctl = &rr_ioctl;
190
191 #if (LINUX_VERSION_CODE < 0x02030d)
192 dev->base_addr = pdev->base_address[0];
193 #else
194 dev->base_addr = pdev->resource[0].start;
195 #endif
196
197 /* display version info if adapter is found */
198 if (!version_disp)
199 {
200 /* set display flag to TRUE so that */
201 /* we only display this string ONCE */
202 version_disp = 1;
203 printk(version);
204 }
205
206 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
207 if (pci_latency <= 0x58){
208 pci_latency = 0x58;
209 pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
210 pci_latency);
211 }
212
213 pci_set_master(pdev);
214
215 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
216 "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
217 dev->base_addr, dev->irq, pci_latency);
218
219 /*
220 * Remap the regs into kernel space.
221 */
222
223 rrpriv->regs = (struct rr_regs *)
224 ioremap(dev->base_addr, 0x1000);
225
226 if (!rrpriv->regs){
227 printk(KERN_ERR "%s: Unable to map I/O register, "
228 "RoadRunner %i will be disabled.\n",
229 dev->name, boards_found);
230 break;
231 }
232
233 /*
234 * Don't access any registes before this point!
235 */
236 #ifdef __BIG_ENDIAN
237 writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP, &rrpriv->regs->HostCtrl);
238 #endif
239 /*
240 * Need to add a case for little-endian 64-bit hosts here.
241 */
242
243 rr_init(dev);
244
245 boards_found++;
246 dev->base_addr = 0;
247 dev = NULL;
248 opdev = pdev;
249 }
250
251 /*
252 * If we're at this point we're going through rr_hippi_probe()
253 * for the first time. Return success (0) if we've initialized
254 * 1 or more boards. Otherwise, return failure (-ENODEV).
255 */
256
257 #ifdef MODULE
258 return boards_found;
259 #else
260 if (boards_found > 0)
261 return 0;
262 else
263 return -ENODEV;
264 #endif
265 }
266
267
268 #ifdef MODULE
269 #if LINUX_VERSION_CODE > 0x20118
270 MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@cern.ch>");
271 MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
272 MODULE_LICENSE("GPL");
273 #endif
274
init_module(void)275 int init_module(void)
276 {
277 int cards;
278
279 root_dev = NULL;
280
281 #ifdef NEW_NETINIT
282 cards = rr_hippi_probe();
283 #else
284 cards = rr_hippi_probe(NULL);
285 #endif
286 return cards ? 0 : -ENODEV;
287 }
288
cleanup_module(void)289 void cleanup_module(void)
290 {
291 struct rr_private *rr;
292 struct net_device *next;
293
294 while (root_dev) {
295 next = ((struct rr_private *)root_dev->priv)->next;
296 rr = (struct rr_private *)root_dev->priv;
297
298 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
299 printk(KERN_ERR "%s: trying to unload running NIC\n",
300 root_dev->name);
301 writel(HALT_NIC, &rr->regs->HostCtrl);
302 }
303
304 iounmap(rr->regs);
305 unregister_hipdev(root_dev);
306 kfree(root_dev);
307
308 root_dev = next;
309 }
310 }
311 #endif
312
313
314 /*
315 * Commands are considered to be slow, thus there is no reason to
316 * inline this.
317 */
rr_issue_cmd(struct rr_private * rrpriv,struct cmd * cmd)318 static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
319 {
320 struct rr_regs *regs;
321 u32 idx;
322
323 regs = rrpriv->regs;
324 /*
325 * This is temporary - it will go away in the final version.
326 * We probably also want to make this function inline.
327 */
328 if (readl(®s->HostCtrl) & NIC_HALTED){
329 printk("issuing command for halted NIC, code 0x%x, "
330 "HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl));
331 if (readl(®s->Mode) & FATAL_ERR)
332 printk("error codes Fail1 %02x, Fail2 %02x\n",
333 readl(®s->Fail1), readl(®s->Fail2));
334 }
335
336 idx = rrpriv->info->cmd_ctrl.pi;
337
338 writel(*(u32*)(cmd), ®s->CmdRing[idx]);
339 wmb();
340
341 idx = (idx - 1) % CMD_RING_ENTRIES;
342 rrpriv->info->cmd_ctrl.pi = idx;
343 wmb();
344
345 if (readl(®s->Mode) & FATAL_ERR)
346 printk("error code %02x\n", readl(®s->Fail1));
347 }
348
349
350 /*
351 * Reset the board in a sensible manner. The NIC is already halted
352 * when we get here and a spin-lock is held.
353 */
rr_reset(struct net_device * dev)354 static int rr_reset(struct net_device *dev)
355 {
356 struct rr_private *rrpriv;
357 struct rr_regs *regs;
358 struct eeprom *hw = NULL;
359 u32 start_pc;
360 int i;
361
362 rrpriv = (struct rr_private *)dev->priv;
363 regs = rrpriv->regs;
364
365 rr_load_firmware(dev);
366
367 writel(0x01000000, ®s->TX_state);
368 writel(0xff800000, ®s->RX_state);
369 writel(0, ®s->AssistState);
370 writel(CLEAR_INTA, ®s->LocalCtrl);
371 writel(0x01, ®s->BrkPt);
372 writel(0, ®s->Timer);
373 writel(0, ®s->TimerRef);
374 writel(RESET_DMA, ®s->DmaReadState);
375 writel(RESET_DMA, ®s->DmaWriteState);
376 writel(0, ®s->DmaWriteHostHi);
377 writel(0, ®s->DmaWriteHostLo);
378 writel(0, ®s->DmaReadHostHi);
379 writel(0, ®s->DmaReadHostLo);
380 writel(0, ®s->DmaReadLen);
381 writel(0, ®s->DmaWriteLen);
382 writel(0, ®s->DmaWriteLcl);
383 writel(0, ®s->DmaWriteIPchecksum);
384 writel(0, ®s->DmaReadLcl);
385 writel(0, ®s->DmaReadIPchecksum);
386 writel(0, ®s->PciState);
387 #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
388 writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode);
389 #elif (BITS_PER_LONG == 64)
390 writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode);
391 #else
392 writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode);
393 #endif
394
395 #if 0
396 /*
397 * Don't worry, this is just black magic.
398 */
399 writel(0xdf000, ®s->RxBase);
400 writel(0xdf000, ®s->RxPrd);
401 writel(0xdf000, ®s->RxCon);
402 writel(0xce000, ®s->TxBase);
403 writel(0xce000, ®s->TxPrd);
404 writel(0xce000, ®s->TxCon);
405 writel(0, ®s->RxIndPro);
406 writel(0, ®s->RxIndCon);
407 writel(0, ®s->RxIndRef);
408 writel(0, ®s->TxIndPro);
409 writel(0, ®s->TxIndCon);
410 writel(0, ®s->TxIndRef);
411 writel(0xcc000, ®s->pad10[0]);
412 writel(0, ®s->DrCmndPro);
413 writel(0, ®s->DrCmndCon);
414 writel(0, ®s->DwCmndPro);
415 writel(0, ®s->DwCmndCon);
416 writel(0, ®s->DwCmndRef);
417 writel(0, ®s->DrDataPro);
418 writel(0, ®s->DrDataCon);
419 writel(0, ®s->DrDataRef);
420 writel(0, ®s->DwDataPro);
421 writel(0, ®s->DwDataCon);
422 writel(0, ®s->DwDataRef);
423 #endif
424
425 writel(0xffffffff, ®s->MbEvent);
426 writel(0, ®s->Event);
427
428 writel(0, ®s->TxPi);
429 writel(0, ®s->IpRxPi);
430
431 writel(0, ®s->EvtCon);
432 writel(0, ®s->EvtPrd);
433
434 rrpriv->info->evt_ctrl.pi = 0;
435
436 for (i = 0; i < CMD_RING_ENTRIES; i++)
437 writel(0, ®s->CmdRing[i]);
438
439 /*
440 * Why 32 ? is this not cache line size dependant?
441 */
442 writel(RBURST_64|WBURST_64, ®s->PciState);
443 wmb();
444
445 start_pc = rr_read_eeprom_word(rrpriv, &hw->rncd_info.FwStart);
446
447 #if (DEBUG > 1)
448 printk("%s: Executing firmware at address 0x%06x\n",
449 dev->name, start_pc);
450 #endif
451
452 writel(start_pc + 0x800, ®s->Pc);
453 wmb();
454 udelay(5);
455
456 writel(start_pc, ®s->Pc);
457 wmb();
458
459 return 0;
460 }
461
462
463 /*
464 * Read a string from the EEPROM.
465 */
rr_read_eeprom(struct rr_private * rrpriv,unsigned long offset,unsigned char * buf,unsigned long length)466 static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
467 unsigned long offset,
468 unsigned char *buf,
469 unsigned long length)
470 {
471 struct rr_regs *regs = rrpriv->regs;
472 u32 misc, io, host, i;
473
474 io = readl(®s->ExtIo);
475 writel(0, ®s->ExtIo);
476 misc = readl(®s->LocalCtrl);
477 writel(0, ®s->LocalCtrl);
478 host = readl(®s->HostCtrl);
479 writel(host | HALT_NIC, ®s->HostCtrl);
480 mb();
481
482 for (i = 0; i < length; i++){
483 writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
484 mb();
485 buf[i] = (readl(®s->WinData) >> 24) & 0xff;
486 mb();
487 }
488
489 writel(host, ®s->HostCtrl);
490 writel(misc, ®s->LocalCtrl);
491 writel(io, ®s->ExtIo);
492 mb();
493 return i;
494 }
495
496
497 /*
498 * Shortcut to read one word (4 bytes) out of the EEPROM and convert
499 * it to our CPU byte-order.
500 */
rr_read_eeprom_word(struct rr_private * rrpriv,void * offset)501 static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
502 void * offset)
503 {
504 u32 word;
505
506 if ((rr_read_eeprom(rrpriv, (unsigned long)offset,
507 (char *)&word, 4) == 4))
508 return be32_to_cpu(word);
509 return 0;
510 }
511
512
513 /*
514 * Write a string to the EEPROM.
515 *
516 * This is only called when the firmware is not running.
517 */
write_eeprom(struct rr_private * rrpriv,unsigned long offset,unsigned char * buf,unsigned long length)518 static unsigned int write_eeprom(struct rr_private *rrpriv,
519 unsigned long offset,
520 unsigned char *buf,
521 unsigned long length)
522 {
523 struct rr_regs *regs = rrpriv->regs;
524 u32 misc, io, data, i, j, ready, error = 0;
525
526 io = readl(®s->ExtIo);
527 writel(0, ®s->ExtIo);
528 misc = readl(®s->LocalCtrl);
529 writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl);
530 mb();
531
532 for (i = 0; i < length; i++){
533 writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
534 mb();
535 data = buf[i] << 24;
536 /*
537 * Only try to write the data if it is not the same
538 * value already.
539 */
540 if ((readl(®s->WinData) & 0xff000000) != data){
541 writel(data, ®s->WinData);
542 ready = 0;
543 j = 0;
544 mb();
545 while(!ready){
546 udelay(20);
547 if ((readl(®s->WinData) & 0xff000000) ==
548 data)
549 ready = 1;
550 mb();
551 if (j++ > 5000){
552 printk("data mismatch: %08x, "
553 "WinData %08x\n", data,
554 readl(®s->WinData));
555 ready = 1;
556 error = 1;
557 }
558 }
559 }
560 }
561
562 writel(misc, ®s->LocalCtrl);
563 writel(io, ®s->ExtIo);
564 mb();
565
566 return error;
567 }
568
569
rr_init(struct net_device * dev)570 static int __init rr_init(struct net_device *dev)
571 {
572 struct rr_private *rrpriv;
573 struct rr_regs *regs;
574 struct eeprom *hw = NULL;
575 u32 sram_size, rev;
576 int i;
577
578 rrpriv = (struct rr_private *)dev->priv;
579 regs = rrpriv->regs;
580
581 rev = readl(®s->FwRev);
582 rrpriv->fw_rev = rev;
583 if (rev > 0x00020024)
584 printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
585 ((rev >> 8) & 0xff), (rev & 0xff));
586 else if (rev >= 0x00020000) {
587 printk(" Firmware revision: %i.%i.%i (2.0.37 or "
588 "later is recommended)\n", (rev >> 16),
589 ((rev >> 8) & 0xff), (rev & 0xff));
590 }else{
591 printk(" Firmware revision too old: %i.%i.%i, please "
592 "upgrade to 2.0.37 or later.\n",
593 (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
594 }
595
596 #if (DEBUG > 2)
597 printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng));
598 #endif
599
600 /*
601 * Read the hardware address from the eeprom. The HW address
602 * is not really necessary for HIPPI but awfully convenient.
603 * The pointer arithmetic to put it in dev_addr is ugly, but
604 * Donald Becker does it this way for the GigE version of this
605 * card and it's shorter and more portable than any
606 * other method I've seen. -VAL
607 */
608
609 *(u16 *)(dev->dev_addr) =
610 htons(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA));
611 *(u32 *)(dev->dev_addr+2) =
612 htonl(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA[4]));
613
614 printk(" MAC: ");
615
616 for (i = 0; i < 5; i++)
617 printk("%2.2x:", dev->dev_addr[i]);
618 printk("%2.2x\n", dev->dev_addr[i]);
619
620 sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
621 printk(" SRAM size 0x%06x\n", sram_size);
622
623 if (sysctl_rmem_max < 262144){
624 printk(" Receive socket buffer limit too low (%i), "
625 "setting to 262144\n", sysctl_rmem_max);
626 sysctl_rmem_max = 262144;
627 }
628
629 if (sysctl_wmem_max < 262144){
630 printk(" Transmit socket buffer limit too low (%i), "
631 "setting to 262144\n", sysctl_wmem_max);
632 sysctl_wmem_max = 262144;
633 }
634
635 rrpriv->next = root_dev;
636 root_dev = dev;
637
638 return 0;
639 }
640
641
rr_init1(struct net_device * dev)642 static int rr_init1(struct net_device *dev)
643 {
644 struct rr_private *rrpriv;
645 struct rr_regs *regs;
646 unsigned long myjif, flags;
647 struct cmd cmd;
648 u32 hostctrl;
649 int ecode = 0;
650 short i;
651
652 rrpriv = (struct rr_private *)dev->priv;
653 regs = rrpriv->regs;
654
655 spin_lock_irqsave(&rrpriv->lock, flags);
656
657 hostctrl = readl(®s->HostCtrl);
658 writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl);
659 wmb();
660
661 if (hostctrl & PARITY_ERR){
662 printk("%s: Parity error halting NIC - this is serious!\n",
663 dev->name);
664 spin_unlock_irqrestore(&rrpriv->lock, flags);
665 ecode = -EFAULT;
666 goto error;
667 }
668
669 set_rxaddr(regs, rrpriv->rx_ctrl);
670 set_infoaddr(regs, rrpriv->info);
671
672 rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
673 rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
674 rrpriv->info->evt_ctrl.mode = 0;
675 rrpriv->info->evt_ctrl.pi = 0;
676 set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring);
677
678 rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
679 rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
680 rrpriv->info->cmd_ctrl.mode = 0;
681 rrpriv->info->cmd_ctrl.pi = 15;
682
683 for (i = 0; i < CMD_RING_ENTRIES; i++) {
684 writel(0, ®s->CmdRing[i]);
685 }
686
687 for (i = 0; i < TX_RING_ENTRIES; i++) {
688 rrpriv->tx_ring[i].size = 0;
689 set_rraddr(&rrpriv->tx_ring[i].addr, 0);
690 rrpriv->tx_skbuff[i] = 0;
691 }
692 rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
693 rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
694 rrpriv->info->tx_ctrl.mode = 0;
695 rrpriv->info->tx_ctrl.pi = 0;
696 set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring);
697
698 /*
699 * Set dirty_tx before we start receiving interrupts, otherwise
700 * the interrupt handler might think it is supposed to process
701 * tx ints before we are up and running, which may cause a null
702 * pointer access in the int handler.
703 */
704 rrpriv->tx_full = 0;
705 rrpriv->cur_rx = 0;
706 rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
707
708 rr_reset(dev);
709
710 /* Tuning values */
711 writel(0x5000, ®s->ConRetry);
712 writel(0x100, ®s->ConRetryTmr);
713 writel(0x500000, ®s->ConTmout);
714 writel(0x60, ®s->IntrTmr);
715 writel(0x500000, ®s->TxDataMvTimeout);
716 writel(0x200000, ®s->RxDataMvTimeout);
717 writel(0x80, ®s->WriteDmaThresh);
718 writel(0x80, ®s->ReadDmaThresh);
719
720 rrpriv->fw_running = 0;
721 wmb();
722
723 hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
724 writel(hostctrl, ®s->HostCtrl);
725 wmb();
726
727 spin_unlock_irqrestore(&rrpriv->lock, flags);
728
729 for (i = 0; i < RX_RING_ENTRIES; i++) {
730 struct sk_buff *skb;
731
732 rrpriv->rx_ring[i].mode = 0;
733 skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
734 if (!skb) {
735 printk(KERN_WARNING "%s: Unable to allocate memory "
736 "for receive ring - halting NIC\n", dev->name);
737 ecode = -ENOMEM;
738 goto error;
739 }
740 rrpriv->rx_skbuff[i] = skb;
741 /*
742 * Sanity test to see if we conflict with the DMA
743 * limitations of the Roadrunner.
744 */
745 if ((((unsigned long)skb->data) & 0xfff) > ~65320)
746 printk("skb alloc error\n");
747
748 set_rraddr(&rrpriv->rx_ring[i].addr, skb->data);
749 rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
750 }
751
752 rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
753 rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
754 rrpriv->rx_ctrl[4].mode = 8;
755 rrpriv->rx_ctrl[4].pi = 0;
756 wmb();
757 set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring);
758
759 udelay(1000);
760
761 /*
762 * Now start the FirmWare.
763 */
764 cmd.code = C_START_FW;
765 cmd.ring = 0;
766 cmd.index = 0;
767
768 rr_issue_cmd(rrpriv, &cmd);
769
770 /*
771 * Give the FirmWare time to chew on the `get running' command.
772 */
773 myjif = jiffies + 5 * HZ;
774 while (time_before(jiffies, myjif) && !rrpriv->fw_running);
775
776 netif_start_queue(dev);
777
778 return ecode;
779
780 error:
781 /*
782 * We might have gotten here because we are out of memory,
783 * make sure we release everything we allocated before failing
784 */
785 for (i = 0; i < RX_RING_ENTRIES; i++) {
786 if (rrpriv->rx_skbuff[i]) {
787 rrpriv->rx_ring[i].size = 0;
788 set_rraddr(&rrpriv->rx_ring[i].addr, 0);
789 dev_kfree_skb(rrpriv->rx_skbuff[i]);
790 }
791 }
792 return ecode;
793 }
794
795
796 /*
797 * All events are considered to be slow (RX/TX ints do not generate
798 * events) and are handled here, outside the main interrupt handler,
799 * to reduce the size of the handler.
800 */
rr_handle_event(struct net_device * dev,u32 prodidx,u32 eidx)801 static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
802 {
803 struct rr_private *rrpriv;
804 struct rr_regs *regs;
805 u32 tmp;
806
807 rrpriv = (struct rr_private *)dev->priv;
808 regs = rrpriv->regs;
809
810 while (prodidx != eidx){
811 switch (rrpriv->evt_ring[eidx].code){
812 case E_NIC_UP:
813 tmp = readl(®s->FwRev);
814 printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
815 "up and running\n", dev->name,
816 (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
817 rrpriv->fw_running = 1;
818 writel(RX_RING_ENTRIES - 1, ®s->IpRxPi);
819 wmb();
820 break;
821 case E_LINK_ON:
822 printk(KERN_INFO "%s: Optical link ON\n", dev->name);
823 break;
824 case E_LINK_OFF:
825 printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
826 break;
827 case E_RX_IDLE:
828 printk(KERN_WARNING "%s: RX data not moving\n",
829 dev->name);
830 goto drop;
831 case E_WATCHDOG:
832 printk(KERN_INFO "%s: The watchdog is here to see "
833 "us\n", dev->name);
834 break;
835 case E_INTERN_ERR:
836 printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
837 dev->name);
838 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
839 ®s->HostCtrl);
840 wmb();
841 break;
842 case E_HOST_ERR:
843 printk(KERN_ERR "%s: Host software error\n",
844 dev->name);
845 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
846 ®s->HostCtrl);
847 wmb();
848 break;
849 /*
850 * TX events.
851 */
852 case E_CON_REJ:
853 printk(KERN_WARNING "%s: Connection rejected\n",
854 dev->name);
855 rrpriv->stats.tx_aborted_errors++;
856 break;
857 case E_CON_TMOUT:
858 printk(KERN_WARNING "%s: Connection timeout\n",
859 dev->name);
860 break;
861 case E_DISC_ERR:
862 printk(KERN_WARNING "%s: HIPPI disconnect error\n",
863 dev->name);
864 rrpriv->stats.tx_aborted_errors++;
865 break;
866 case E_INT_PRTY:
867 printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
868 dev->name);
869 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
870 ®s->HostCtrl);
871 wmb();
872 break;
873 case E_TX_IDLE:
874 printk(KERN_WARNING "%s: Transmitter idle\n",
875 dev->name);
876 break;
877 case E_TX_LINK_DROP:
878 printk(KERN_WARNING "%s: Link lost during transmit\n",
879 dev->name);
880 rrpriv->stats.tx_aborted_errors++;
881 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
882 ®s->HostCtrl);
883 wmb();
884 break;
885 case E_TX_INV_RNG:
886 printk(KERN_ERR "%s: Invalid send ring block\n",
887 dev->name);
888 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
889 ®s->HostCtrl);
890 wmb();
891 break;
892 case E_TX_INV_BUF:
893 printk(KERN_ERR "%s: Invalid send buffer address\n",
894 dev->name);
895 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
896 ®s->HostCtrl);
897 wmb();
898 break;
899 case E_TX_INV_DSC:
900 printk(KERN_ERR "%s: Invalid descriptor address\n",
901 dev->name);
902 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
903 ®s->HostCtrl);
904 wmb();
905 break;
906 /*
907 * RX events.
908 */
909 case E_RX_RNG_OUT:
910 printk(KERN_INFO "%s: Receive ring full\n", dev->name);
911 break;
912
913 case E_RX_PAR_ERR:
914 printk(KERN_WARNING "%s: Receive parity error\n",
915 dev->name);
916 goto drop;
917 case E_RX_LLRC_ERR:
918 printk(KERN_WARNING "%s: Receive LLRC error\n",
919 dev->name);
920 goto drop;
921 case E_PKT_LN_ERR:
922 printk(KERN_WARNING "%s: Receive packet length "
923 "error\n", dev->name);
924 goto drop;
925 case E_DTA_CKSM_ERR:
926 printk(KERN_WARNING "%s: Data checksum error\n",
927 dev->name);
928 goto drop;
929 case E_SHT_BST:
930 printk(KERN_WARNING "%s: Unexpected short burst "
931 "error\n", dev->name);
932 goto drop;
933 case E_STATE_ERR:
934 printk(KERN_WARNING "%s: Recv. state transition"
935 " error\n", dev->name);
936 goto drop;
937 case E_UNEXP_DATA:
938 printk(KERN_WARNING "%s: Unexpected data error\n",
939 dev->name);
940 goto drop;
941 case E_LST_LNK_ERR:
942 printk(KERN_WARNING "%s: Link lost error\n",
943 dev->name);
944 goto drop;
945 case E_FRM_ERR:
946 printk(KERN_WARNING "%s: Framming Error\n",
947 dev->name);
948 goto drop;
949 case E_FLG_SYN_ERR:
950 printk(KERN_WARNING "%s: Flag sync. lost during"
951 "packet\n", dev->name);
952 goto drop;
953 case E_RX_INV_BUF:
954 printk(KERN_ERR "%s: Invalid receive buffer "
955 "address\n", dev->name);
956 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
957 ®s->HostCtrl);
958 wmb();
959 break;
960 case E_RX_INV_DSC:
961 printk(KERN_ERR "%s: Invalid receive descriptor "
962 "address\n", dev->name);
963 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
964 ®s->HostCtrl);
965 wmb();
966 break;
967 case E_RNG_BLK:
968 printk(KERN_ERR "%s: Invalid ring block\n",
969 dev->name);
970 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
971 ®s->HostCtrl);
972 wmb();
973 break;
974 drop:
975 /* Label packet to be dropped.
976 * Actual dropping occurs in rx
977 * handling.
978 *
979 * The index of packet we get to drop is
980 * the index of the packet following
981 * the bad packet. -kbf
982 */
983 {
984 u16 index = rrpriv->evt_ring[eidx].index;
985 index = (index + (RX_RING_ENTRIES - 1)) %
986 RX_RING_ENTRIES;
987 rrpriv->rx_ring[index].mode |=
988 (PACKET_BAD | PACKET_END);
989 }
990 break;
991 default:
992 printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
993 dev->name, rrpriv->evt_ring[eidx].code);
994 }
995 eidx = (eidx + 1) % EVT_RING_ENTRIES;
996 }
997
998 rrpriv->info->evt_ctrl.pi = eidx;
999 wmb();
1000 return eidx;
1001 }
1002
1003
rx_int(struct net_device * dev,u32 rxlimit,u32 index)1004 static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
1005 {
1006 struct rr_private *rrpriv = (struct rr_private *)dev->priv;
1007 struct rr_regs *regs = rrpriv->regs;
1008
1009 do {
1010 u32 pkt_len;
1011 pkt_len = rrpriv->rx_ring[index].size;
1012 #if (DEBUG > 2)
1013 printk("index %i, rxlimit %i\n", index, rxlimit);
1014 printk("len %x, mode %x\n", pkt_len,
1015 rrpriv->rx_ring[index].mode);
1016 #endif
1017 if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
1018 rrpriv->stats.rx_dropped++;
1019 goto defer;
1020 }
1021
1022 if (pkt_len > 0){
1023 struct sk_buff *skb;
1024
1025 if (pkt_len < PKT_COPY_THRESHOLD) {
1026 skb = alloc_skb(pkt_len, GFP_ATOMIC);
1027 if (skb == NULL){
1028 printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
1029 rrpriv->stats.rx_dropped++;
1030 goto defer;
1031 }else
1032 memcpy(skb_put(skb, pkt_len),
1033 rrpriv->rx_skbuff[index]->data,
1034 pkt_len);
1035 }else{
1036 struct sk_buff *newskb;
1037
1038 newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
1039 GFP_ATOMIC);
1040 if (newskb){
1041 skb = rrpriv->rx_skbuff[index];
1042 skb_put(skb, pkt_len);
1043 rrpriv->rx_skbuff[index] = newskb;
1044 set_rraddr(&rrpriv->rx_ring[index].addr, newskb->data);
1045 }else{
1046 printk("%s: Out of memory, deferring "
1047 "packet\n", dev->name);
1048 rrpriv->stats.rx_dropped++;
1049 goto defer;
1050 }
1051 }
1052 skb->dev = dev;
1053 skb->protocol = hippi_type_trans(skb, dev);
1054
1055 netif_rx(skb); /* send it up */
1056
1057 dev->last_rx = jiffies;
1058 rrpriv->stats.rx_packets++;
1059 rrpriv->stats.rx_bytes += pkt_len;
1060 }
1061 defer:
1062 rrpriv->rx_ring[index].mode = 0;
1063 rrpriv->rx_ring[index].size = dev->mtu + HIPPI_HLEN;
1064
1065 if ((index & 7) == 7)
1066 writel(index, ®s->IpRxPi);
1067
1068 index = (index + 1) % RX_RING_ENTRIES;
1069 } while(index != rxlimit);
1070
1071 rrpriv->cur_rx = index;
1072 wmb();
1073 }
1074
1075
rr_interrupt(int irq,void * dev_id,struct pt_regs * ptregs)1076 static void rr_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
1077 {
1078 struct rr_private *rrpriv;
1079 struct rr_regs *regs;
1080 struct net_device *dev = (struct net_device *)dev_id;
1081 u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
1082
1083 rrpriv = (struct rr_private *)dev->priv;
1084 regs = rrpriv->regs;
1085
1086 if (!(readl(®s->HostCtrl) & RR_INT))
1087 return;
1088
1089 spin_lock(&rrpriv->lock);
1090
1091 prodidx = readl(®s->EvtPrd);
1092 txcsmr = (prodidx >> 8) & 0xff;
1093 rxlimit = (prodidx >> 16) & 0xff;
1094 prodidx &= 0xff;
1095
1096 #if (DEBUG > 2)
1097 printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
1098 prodidx, rrpriv->info->evt_ctrl.pi);
1099 #endif
1100 /*
1101 * Order here is important. We must handle events
1102 * before doing anything else in order to catch
1103 * such things as LLRC errors, etc -kbf
1104 */
1105
1106 eidx = rrpriv->info->evt_ctrl.pi;
1107 if (prodidx != eidx)
1108 eidx = rr_handle_event(dev, prodidx, eidx);
1109
1110 rxindex = rrpriv->cur_rx;
1111 if (rxindex != rxlimit)
1112 rx_int(dev, rxlimit, rxindex);
1113
1114 txcon = rrpriv->dirty_tx;
1115 if (txcsmr != txcon) {
1116 do {
1117 /* Due to occational firmware TX producer/consumer out
1118 * of sync. error need to check entry in ring -kbf
1119 */
1120 if(rrpriv->tx_skbuff[txcon]){
1121 rrpriv->stats.tx_packets++;
1122 rrpriv->stats.tx_bytes +=rrpriv->tx_skbuff[txcon]->len;
1123 dev_kfree_skb_irq(rrpriv->tx_skbuff[txcon]);
1124
1125 rrpriv->tx_skbuff[txcon] = NULL;
1126 rrpriv->tx_ring[txcon].size = 0;
1127 set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
1128 rrpriv->tx_ring[txcon].mode = 0;
1129 }
1130 txcon = (txcon + 1) % TX_RING_ENTRIES;
1131 } while (txcsmr != txcon);
1132 wmb();
1133
1134 rrpriv->dirty_tx = txcon;
1135 if (rrpriv->tx_full && rr_if_busy(dev) &&
1136 (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
1137 != rrpriv->dirty_tx)){
1138 rrpriv->tx_full = 0;
1139 netif_wake_queue(dev);
1140 rr_mark_net_bh(NET_BH);
1141 }
1142 }
1143
1144 eidx |= ((txcsmr << 8) | (rxlimit << 16));
1145 writel(eidx, ®s->EvtCon);
1146 wmb();
1147
1148 spin_unlock(&rrpriv->lock);
1149 }
1150
1151
rr_timer(unsigned long data)1152 static void rr_timer(unsigned long data)
1153 {
1154 struct net_device *dev = (struct net_device *)data;
1155 struct rr_private *rrpriv = (struct rr_private *)dev->priv;
1156 struct rr_regs *regs = rrpriv->regs;
1157 unsigned long flags;
1158 int i;
1159
1160 if (readl(®s->HostCtrl) & NIC_HALTED){
1161 printk("%s: Restarting nic\n", dev->name);
1162 memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
1163 memset(rrpriv->info, 0, sizeof(struct rr_info));
1164 wmb();
1165 for (i = 0; i < TX_RING_ENTRIES; i++) {
1166 if (rrpriv->tx_skbuff[i]) {
1167 rrpriv->tx_ring[i].size = 0;
1168 set_rraddr(&rrpriv->tx_ring[i].addr, 0);
1169 dev_kfree_skb(rrpriv->tx_skbuff[i]);
1170 rrpriv->tx_skbuff[i] = NULL;
1171 }
1172 }
1173
1174 for (i = 0; i < RX_RING_ENTRIES; i++) {
1175 if (rrpriv->rx_skbuff[i]) {
1176 rrpriv->rx_ring[i].size = 0;
1177 set_rraddr(&rrpriv->rx_ring[i].addr, 0);
1178 dev_kfree_skb(rrpriv->rx_skbuff[i]);
1179 rrpriv->rx_skbuff[i] = NULL;
1180 }
1181 }
1182 if (rr_init1(dev)) {
1183 spin_lock_irqsave(&rrpriv->lock, flags);
1184 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
1185 ®s->HostCtrl);
1186 spin_unlock_irqrestore(&rrpriv->lock, flags);
1187 }
1188 }
1189 rrpriv->timer.expires = RUN_AT(5*HZ);
1190 add_timer(&rrpriv->timer);
1191 }
1192
1193
rr_open(struct net_device * dev)1194 static int rr_open(struct net_device *dev)
1195 {
1196 struct rr_private *rrpriv;
1197 struct rr_regs *regs;
1198 int ecode = 0;
1199 unsigned long flags;
1200
1201 rrpriv = (struct rr_private *)dev->priv;
1202 regs = rrpriv->regs;
1203
1204 if (rrpriv->fw_rev < 0x00020000) {
1205 printk(KERN_WARNING "%s: trying to configure device with "
1206 "obsolete firmware\n", dev->name);
1207 ecode = -EBUSY;
1208 goto error;
1209 }
1210
1211 rrpriv->rx_ctrl = kmalloc(256*sizeof(struct ring_ctrl), GFP_KERNEL);
1212 if (!rrpriv->rx_ctrl) {
1213 ecode = -ENOMEM;
1214 goto error;
1215 }
1216
1217 rrpriv->info = kmalloc(sizeof(struct rr_info), GFP_KERNEL);
1218 if (!rrpriv->info){
1219 ecode = -ENOMEM;
1220 goto error;
1221 }
1222 memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
1223 memset(rrpriv->info, 0, sizeof(struct rr_info));
1224 wmb();
1225
1226 spin_lock_irqsave(&rrpriv->lock, flags);
1227 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
1228 spin_unlock_irqrestore(&rrpriv->lock, flags);
1229
1230 if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ, rrpriv->name, dev))
1231 {
1232 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1233 dev->name, dev->irq);
1234 ecode = -EAGAIN;
1235 goto error;
1236 }
1237
1238 if ((ecode = rr_init1(dev)))
1239 goto error;
1240
1241 /* Set the timer to switch to check for link beat and perhaps switch
1242 to an alternate media type. */
1243 init_timer(&rrpriv->timer);
1244 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
1245 rrpriv->timer.data = (unsigned long)dev;
1246 rrpriv->timer.function = &rr_timer; /* timer handler */
1247 add_timer(&rrpriv->timer);
1248
1249 netif_start_queue(dev);
1250
1251 return ecode;
1252
1253 error:
1254 spin_lock_irqsave(&rrpriv->lock, flags);
1255 writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
1256 spin_unlock_irqrestore(&rrpriv->lock, flags);
1257
1258 if (rrpriv->info) {
1259 kfree(rrpriv->info);
1260 rrpriv->info = NULL;
1261 }
1262 if (rrpriv->rx_ctrl) {
1263 kfree(rrpriv->rx_ctrl);
1264 rrpriv->rx_ctrl = NULL;
1265 }
1266
1267 netif_stop_queue(dev);
1268 rr_if_down(dev);
1269
1270 return ecode;
1271 }
1272
1273
rr_dump(struct net_device * dev)1274 static void rr_dump(struct net_device *dev)
1275 {
1276 struct rr_private *rrpriv;
1277 struct rr_regs *regs;
1278 u32 index, cons;
1279 short i;
1280 int len;
1281
1282 rrpriv = (struct rr_private *)dev->priv;
1283 regs = rrpriv->regs;
1284
1285 printk("%s: dumping NIC TX rings\n", dev->name);
1286
1287 printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
1288 readl(®s->RxPrd), readl(®s->TxPrd),
1289 readl(®s->EvtPrd), readl(®s->TxPi),
1290 rrpriv->info->tx_ctrl.pi);
1291
1292 printk("Error code 0x%x\n", readl(®s->Fail1));
1293
1294 index = (((readl(®s->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
1295 cons = rrpriv->dirty_tx;
1296 printk("TX ring index %i, TX consumer %i\n",
1297 index, cons);
1298
1299 if (rrpriv->tx_skbuff[index]){
1300 len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
1301 printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
1302 for (i = 0; i < len; i++){
1303 if (!(i & 7))
1304 printk("\n");
1305 printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
1306 }
1307 printk("\n");
1308 }
1309
1310 if (rrpriv->tx_skbuff[cons]){
1311 len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
1312 printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
1313 printk("mode 0x%x, size 0x%x,\n phys %08x (virt %08lx), skbuff-addr %08lx, truesize 0x%x\n",
1314 rrpriv->tx_ring[cons].mode,
1315 rrpriv->tx_ring[cons].size,
1316 rrpriv->tx_ring[cons].addr.addrlo,
1317 (unsigned long)bus_to_virt(rrpriv->tx_ring[cons].addr.addrlo),
1318 (unsigned long)rrpriv->tx_skbuff[cons]->data,
1319 (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
1320 for (i = 0; i < len; i++){
1321 if (!(i & 7))
1322 printk("\n");
1323 printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
1324 }
1325 printk("\n");
1326 }
1327
1328 printk("dumping TX ring info:\n");
1329 for (i = 0; i < TX_RING_ENTRIES; i++)
1330 printk("mode 0x%x, size 0x%x, phys-addr %08x\n",
1331 rrpriv->tx_ring[i].mode,
1332 rrpriv->tx_ring[i].size,
1333 rrpriv->tx_ring[i].addr.addrlo);
1334
1335 }
1336
1337
rr_close(struct net_device * dev)1338 static int rr_close(struct net_device *dev)
1339 {
1340 struct rr_private *rrpriv;
1341 struct rr_regs *regs;
1342 u32 tmp;
1343 short i;
1344
1345 netif_stop_queue(dev);
1346 rr_if_down(dev);
1347
1348 rrpriv = (struct rr_private *)dev->priv;
1349 regs = rrpriv->regs;
1350
1351 /*
1352 * Lock to make sure we are not cleaning up while another CPU
1353 * handling interrupts.
1354 */
1355 spin_lock(&rrpriv->lock);
1356
1357 tmp = readl(®s->HostCtrl);
1358 if (tmp & NIC_HALTED){
1359 printk("%s: NIC already halted\n", dev->name);
1360 rr_dump(dev);
1361 }else{
1362 tmp |= HALT_NIC | RR_CLEAR_INT;
1363 writel(tmp, ®s->HostCtrl);
1364 wmb();
1365 }
1366
1367 rrpriv->fw_running = 0;
1368
1369 del_timer(&rrpriv->timer);
1370
1371 writel(0, ®s->TxPi);
1372 writel(0, ®s->IpRxPi);
1373
1374 writel(0, ®s->EvtCon);
1375 writel(0, ®s->EvtPrd);
1376
1377 for (i = 0; i < CMD_RING_ENTRIES; i++)
1378 writel(0, ®s->CmdRing[i]);
1379
1380 rrpriv->info->tx_ctrl.entries = 0;
1381 rrpriv->info->cmd_ctrl.pi = 0;
1382 rrpriv->info->evt_ctrl.pi = 0;
1383 rrpriv->rx_ctrl[4].entries = 0;
1384
1385 for (i = 0; i < TX_RING_ENTRIES; i++) {
1386 if (rrpriv->tx_skbuff[i]) {
1387 rrpriv->tx_ring[i].size = 0;
1388 set_rraddr(&rrpriv->tx_ring[i].addr, 0);
1389 dev_kfree_skb(rrpriv->tx_skbuff[i]);
1390 rrpriv->tx_skbuff[i] = NULL;
1391 }
1392 }
1393
1394 for (i = 0; i < RX_RING_ENTRIES; i++) {
1395 if (rrpriv->rx_skbuff[i]) {
1396 rrpriv->rx_ring[i].size = 0;
1397 set_rraddr(&rrpriv->rx_ring[i].addr, 0);
1398 dev_kfree_skb(rrpriv->rx_skbuff[i]);
1399 rrpriv->rx_skbuff[i] = NULL;
1400 }
1401 }
1402
1403 if (rrpriv->rx_ctrl) {
1404 kfree(rrpriv->rx_ctrl);
1405 rrpriv->rx_ctrl = NULL;
1406 }
1407 if (rrpriv->info) {
1408 kfree(rrpriv->info);
1409 rrpriv->info = NULL;
1410 }
1411
1412 free_irq(dev->irq, dev);
1413 spin_unlock(&rrpriv->lock);
1414
1415 return 0;
1416 }
1417
1418
rr_start_xmit(struct sk_buff * skb,struct net_device * dev)1419 static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
1420 {
1421 struct rr_private *rrpriv = (struct rr_private *)dev->priv;
1422 struct rr_regs *regs = rrpriv->regs;
1423 struct ring_ctrl *txctrl;
1424 unsigned long flags;
1425 u32 index, len = skb->len;
1426 u32 *ifield;
1427 struct sk_buff *new_skb;
1428
1429 if (readl(®s->Mode) & FATAL_ERR)
1430 printk("error codes Fail1 %02x, Fail2 %02x\n",
1431 readl(®s->Fail1), readl(®s->Fail2));
1432
1433 /*
1434 * We probably need to deal with tbusy here to prevent overruns.
1435 */
1436
1437 if (skb_headroom(skb) < 8){
1438 printk("incoming skb too small - reallocating\n");
1439 if (!(new_skb = dev_alloc_skb(len + 8))) {
1440 dev_kfree_skb(skb);
1441 netif_wake_queue(dev);
1442 return -EBUSY;
1443 }
1444 skb_reserve(new_skb, 8);
1445 skb_put(new_skb, len);
1446 memcpy(new_skb->data, skb->data, len);
1447 dev_kfree_skb(skb);
1448 skb = new_skb;
1449 }
1450
1451 ifield = (u32 *)skb_push(skb, 8);
1452
1453 ifield[0] = 0;
1454 ifield[1] = skb->private.ifield;
1455
1456 /*
1457 * We don't need the lock before we are actually going to start
1458 * fiddling with the control blocks.
1459 */
1460 spin_lock_irqsave(&rrpriv->lock, flags);
1461
1462 txctrl = &rrpriv->info->tx_ctrl;
1463
1464 index = txctrl->pi;
1465
1466 rrpriv->tx_skbuff[index] = skb;
1467 set_rraddr(&rrpriv->tx_ring[index].addr, skb->data);
1468 rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
1469 rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
1470 txctrl->pi = (index + 1) % TX_RING_ENTRIES;
1471 wmb();
1472 writel(txctrl->pi, ®s->TxPi);
1473
1474 if (txctrl->pi == rrpriv->dirty_tx){
1475 rrpriv->tx_full = 1;
1476 netif_stop_queue(dev);
1477 }
1478
1479 spin_unlock_irqrestore(&rrpriv->lock, flags);
1480
1481 dev->trans_start = jiffies;
1482 return 0;
1483 }
1484
1485
rr_get_stats(struct net_device * dev)1486 static struct net_device_stats *rr_get_stats(struct net_device *dev)
1487 {
1488 struct rr_private *rrpriv;
1489
1490 rrpriv = (struct rr_private *)dev->priv;
1491
1492 return(&rrpriv->stats);
1493 }
1494
1495
1496 /*
1497 * Read the firmware out of the EEPROM and put it into the SRAM
1498 * (or from user space - later)
1499 *
1500 * This operation requires the NIC to be halted and is performed with
1501 * interrupts disabled and with the spinlock hold.
1502 */
rr_load_firmware(struct net_device * dev)1503 static int rr_load_firmware(struct net_device *dev)
1504 {
1505 struct rr_private *rrpriv;
1506 struct rr_regs *regs;
1507 unsigned long eptr, segptr;
1508 int i, j;
1509 u32 localctrl, sptr, len, tmp;
1510 u32 p2len, p2size, nr_seg, revision, io, sram_size;
1511 struct eeprom *hw = NULL;
1512
1513 rrpriv = (struct rr_private *)dev->priv;
1514 regs = rrpriv->regs;
1515
1516 if (dev->flags & IFF_UP)
1517 return -EBUSY;
1518
1519 if (!(readl(®s->HostCtrl) & NIC_HALTED)){
1520 printk("%s: Trying to load firmware to a running NIC.\n",
1521 dev->name);
1522 return -EBUSY;
1523 }
1524
1525 localctrl = readl(®s->LocalCtrl);
1526 writel(0, ®s->LocalCtrl);
1527
1528 writel(0, ®s->EvtPrd);
1529 writel(0, ®s->RxPrd);
1530 writel(0, ®s->TxPrd);
1531
1532 /*
1533 * First wipe the entire SRAM, otherwise we might run into all
1534 * kinds of trouble ... sigh, this took almost all afternoon
1535 * to track down ;-(
1536 */
1537 io = readl(®s->ExtIo);
1538 writel(0, ®s->ExtIo);
1539 sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
1540
1541 for (i = 200; i < sram_size / 4; i++){
1542 writel(i * 4, ®s->WinBase);
1543 mb();
1544 writel(0, ®s->WinData);
1545 mb();
1546 }
1547 writel(io, ®s->ExtIo);
1548 mb();
1549
1550 eptr = (unsigned long)rr_read_eeprom_word(rrpriv,
1551 &hw->rncd_info.AddrRunCodeSegs);
1552 eptr = ((eptr & 0x1fffff) >> 3);
1553
1554 p2len = rr_read_eeprom_word(rrpriv, (void *)(0x83*4));
1555 p2len = (p2len << 2);
1556 p2size = rr_read_eeprom_word(rrpriv, (void *)(0x84*4));
1557 p2size = ((p2size & 0x1fffff) >> 3);
1558
1559 if ((eptr < p2size) || (eptr > (p2size + p2len))){
1560 printk("%s: eptr is invalid\n", dev->name);
1561 goto out;
1562 }
1563
1564 revision = rr_read_eeprom_word(rrpriv, &hw->manf.HeaderFmt);
1565
1566 if (revision != 1){
1567 printk("%s: invalid firmware format (%i)\n",
1568 dev->name, revision);
1569 goto out;
1570 }
1571
1572 nr_seg = rr_read_eeprom_word(rrpriv, (void *)eptr);
1573 eptr +=4;
1574 #if (DEBUG > 1)
1575 printk("%s: nr_seg %i\n", dev->name, nr_seg);
1576 #endif
1577
1578 for (i = 0; i < nr_seg; i++){
1579 sptr = rr_read_eeprom_word(rrpriv, (void *)eptr);
1580 eptr += 4;
1581 len = rr_read_eeprom_word(rrpriv, (void *)eptr);
1582 eptr += 4;
1583 segptr = (unsigned long)rr_read_eeprom_word(rrpriv, (void *)eptr);
1584 segptr = ((segptr & 0x1fffff) >> 3);
1585 eptr += 4;
1586 #if (DEBUG > 1)
1587 printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
1588 dev->name, i, sptr, len, segptr);
1589 #endif
1590 for (j = 0; j < len; j++){
1591 tmp = rr_read_eeprom_word(rrpriv, (void *)segptr);
1592 writel(sptr, ®s->WinBase);
1593 mb();
1594 writel(tmp, ®s->WinData);
1595 mb();
1596 segptr += 4;
1597 sptr += 4;
1598 }
1599 }
1600
1601 out:
1602 writel(localctrl, ®s->LocalCtrl);
1603 mb();
1604 return 0;
1605 }
1606
1607
rr_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1608 static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1609 {
1610 struct rr_private *rrpriv;
1611 unsigned char *image, *oldimage;
1612 unsigned int i;
1613 int error = -EOPNOTSUPP;
1614
1615 rrpriv = dev->priv;
1616
1617 switch(cmd){
1618 case SIOCRRGFW:
1619 if (!capable(CAP_SYS_RAWIO)){
1620 return -EPERM;
1621 }
1622
1623 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1624 if (!image){
1625 printk(KERN_ERR "%s: Unable to allocate memory "
1626 "for EEPROM image\n", dev->name);
1627 return -ENOMEM;
1628 }
1629
1630 spin_lock(&rrpriv->lock);
1631
1632 if (rrpriv->fw_running){
1633 printk("%s: Firmware already running\n", dev->name);
1634 error = -EPERM;
1635 goto out_spin;
1636 }
1637
1638 i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1639 if (i != EEPROM_BYTES){
1640 printk(KERN_ERR "%s: Error reading EEPROM\n", dev->name);
1641 error = -EFAULT;
1642 goto out_spin;
1643 }
1644 spin_unlock(&rrpriv->lock);
1645 error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
1646 if (error)
1647 error = -EFAULT;
1648 kfree(image);
1649 return error;
1650
1651 case SIOCRRPFW:
1652 if (!capable(CAP_SYS_RAWIO)){
1653 return -EPERM;
1654 }
1655
1656 image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1657 if (!image){
1658 printk(KERN_ERR "%s: Unable to allocate memory "
1659 "for EEPROM image\n", dev->name);
1660 return -ENOMEM;
1661 }
1662
1663 oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
1664 if (!oldimage){
1665 kfree(image);
1666 printk(KERN_ERR "%s: Unable to allocate memory "
1667 "for old EEPROM image\n", dev->name);
1668 return -ENOMEM;
1669 }
1670
1671 error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
1672 if (error) {
1673 kfree(image);
1674 kfree(oldimage);
1675 return -EFAULT;
1676 }
1677
1678 spin_lock(&rrpriv->lock);
1679 if (rrpriv->fw_running){
1680 kfree(oldimage);
1681 printk("%s: Firmware already running\n", dev->name);
1682 error = -EPERM;
1683 goto out_spin;
1684 }
1685
1686 printk("%s: Updating EEPROM firmware\n", dev->name);
1687
1688 error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
1689 if (error)
1690 printk(KERN_ERR "%s: Error writing EEPROM\n",
1691 dev->name);
1692
1693 i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
1694 if (i != EEPROM_BYTES)
1695 printk(KERN_ERR "%s: Error reading back EEPROM "
1696 "image\n", dev->name);
1697
1698 spin_unlock(&rrpriv->lock);
1699 error = memcmp(image, oldimage, EEPROM_BYTES);
1700 if (error){
1701 printk(KERN_ERR "%s: Error verifying EEPROM image\n",
1702 dev->name);
1703 error = -EFAULT;
1704 }
1705 kfree(image);
1706 kfree(oldimage);
1707 return error;
1708
1709 case SIOCRRID:
1710 return put_user(0x52523032, (int *)(&rq->ifr_data[0]));
1711 default:
1712 return error;
1713 }
1714
1715 out_spin:
1716 kfree(image);
1717 spin_unlock(&rrpriv->lock);
1718 return error;
1719 }
1720
1721
1722 /*
1723 * Local variables:
1724 * compile-command: "gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c rrunner.c"
1725 * End:
1726 */
1727