1 /******************************************************************************
2 *
3 * nicstar.c
4 *
5 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
6 *
7 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
8 * It was taken from the frle-0.22 device driver.
9 * As the file doesn't have a copyright notice, in the file
10 * nicstarmac.copyright I put the copyright notice from the
11 * frle-0.22 device driver.
12 * Some code is based on the nicstar driver by M. Welsh.
13 *
14 * Author: Rui Prior (rprior@inescn.pt)
15 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
16 *
17 *
18 * (C) INESC 1999
19 *
20 *
21 ******************************************************************************/
22
23
24 /**** IMPORTANT INFORMATION ***************************************************
25 *
26 * There are currently three types of spinlocks:
27 *
28 * 1 - Per card interrupt spinlock (to protect structures and such)
29 * 2 - Per SCQ scq spinlock
30 * 3 - Per card resource spinlock (to access registers, etc.)
31 *
32 * These must NEVER be grabbed in reverse order.
33 *
34 ******************************************************************************/
35
36 /* Header files ***************************************************************/
37
38 #include <linux/module.h>
39 #include <linux/config.h>
40 #include <linux/kernel.h>
41 #include <linux/skbuff.h>
42 #include <linux/atmdev.h>
43 #include <linux/atm.h>
44 #include <linux/pci.h>
45 #include <linux/types.h>
46 #include <linux/string.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/sched.h>
50 #include <linux/timer.h>
51 #include <linux/interrupt.h>
52 #include <linux/bitops.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <asm/atomic.h>
56 #include "nicstar.h"
57 #include "nicstarmac.h"
58 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
59 #include "suni.h"
60 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
61 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
62 #include "idt77105.h"
63 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
64
65 #if BITS_PER_LONG != 32
66 # error FIXME: this driver requires a 32-bit platform
67 #endif
68
69 /* Additional code ************************************************************/
70
71 #include "nicstarmac.c"
72
73
74 /* Configurable parameters ****************************************************/
75
76 #undef PHY_LOOPBACK
77 #undef TX_DEBUG
78 #undef RX_DEBUG
79 #undef GENERAL_DEBUG
80 #undef EXTRA_DEBUG
81
82 #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
83 you're going to use only raw ATM */
84
85
86 /* Do not touch these *********************************************************/
87
88 #ifdef TX_DEBUG
89 #define TXPRINTK(args...) printk(args)
90 #else
91 #define TXPRINTK(args...)
92 #endif /* TX_DEBUG */
93
94 #ifdef RX_DEBUG
95 #define RXPRINTK(args...) printk(args)
96 #else
97 #define RXPRINTK(args...)
98 #endif /* RX_DEBUG */
99
100 #ifdef GENERAL_DEBUG
101 #define PRINTK(args...) printk(args)
102 #else
103 #define PRINTK(args...)
104 #endif /* GENERAL_DEBUG */
105
106 #ifdef EXTRA_DEBUG
107 #define XPRINTK(args...) printk(args)
108 #else
109 #define XPRINTK(args...)
110 #endif /* EXTRA_DEBUG */
111
112
113 /* Macros *********************************************************************/
114
115 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
116
117 #define NS_DELAY mdelay(1)
118
119 #define ALIGN_BUS_ADDR(addr, alignment) \
120 ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
121 #define ALIGN_ADDRESS(addr, alignment) \
122 bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment))
123
124 #undef CEIL
125
126 #ifndef ATM_SKB
127 #define ATM_SKB(s) (&(s)->atm)
128 #endif
129
130 /* Spinlock debugging stuff */
131 #ifdef NS_DEBUG_SPINLOCKS /* See nicstar.h */
132 #define ns_grab_int_lock(card,flags) \
133 do { \
134 unsigned long nsdsf, nsdsf2; \
135 local_irq_save(flags); \
136 save_flags(nsdsf); cli();\
137 if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
138 (flags)&(1<<9)?"en":"dis"); \
139 if (spin_is_locked(&(card)->int_lock) && \
140 (card)->cpu_int == smp_processor_id()) { \
141 printk("nicstar.c: line %d (cpu %d) int_lock already locked at line %d (cpu %d)\n", \
142 __LINE__, smp_processor_id(), (card)->has_int_lock, \
143 (card)->cpu_int); \
144 printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
145 } \
146 if (spin_is_locked(&(card)->res_lock) && \
147 (card)->cpu_res == smp_processor_id()) { \
148 printk("nicstar.c: line %d (cpu %d) res_lock locked at line %d (cpu %d)(trying int)\n", \
149 __LINE__, smp_processor_id(), (card)->has_res_lock, \
150 (card)->cpu_res); \
151 printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
152 } \
153 spin_lock_irq(&(card)->int_lock); \
154 (card)->has_int_lock = __LINE__; \
155 (card)->cpu_int = smp_processor_id(); \
156 restore_flags(nsdsf); } while (0)
157 #define ns_grab_res_lock(card,flags) \
158 do { \
159 unsigned long nsdsf, nsdsf2; \
160 local_irq_save(flags); \
161 save_flags(nsdsf); cli();\
162 if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
163 (flags)&(1<<9)?"en":"dis"); \
164 if (spin_is_locked(&(card)->res_lock) && \
165 (card)->cpu_res == smp_processor_id()) { \
166 printk("nicstar.c: line %d (cpu %d) res_lock already locked at line %d (cpu %d)\n", \
167 __LINE__, smp_processor_id(), (card)->has_res_lock, \
168 (card)->cpu_res); \
169 printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
170 } \
171 spin_lock_irq(&(card)->res_lock); \
172 (card)->has_res_lock = __LINE__; \
173 (card)->cpu_res = smp_processor_id(); \
174 restore_flags(nsdsf); } while (0)
175 #define ns_grab_scq_lock(card,scq,flags) \
176 do { \
177 unsigned long nsdsf, nsdsf2; \
178 local_irq_save(flags); \
179 save_flags(nsdsf); cli();\
180 if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
181 (flags)&(1<<9)?"en":"dis"); \
182 if (spin_is_locked(&(scq)->lock) && \
183 (scq)->cpu_lock == smp_processor_id()) { \
184 printk("nicstar.c: line %d (cpu %d) this scq_lock already locked at line %d (cpu %d)\n", \
185 __LINE__, smp_processor_id(), (scq)->has_lock, \
186 (scq)->cpu_lock); \
187 printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
188 } \
189 if (spin_is_locked(&(card)->res_lock) && \
190 (card)->cpu_res == smp_processor_id()) { \
191 printk("nicstar.c: line %d (cpu %d) res_lock locked at line %d (cpu %d)(trying scq)\n", \
192 __LINE__, smp_processor_id(), (card)->has_res_lock, \
193 (card)->cpu_res); \
194 printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
195 } \
196 spin_lock_irq(&(scq)->lock); \
197 (scq)->has_lock = __LINE__; \
198 (scq)->cpu_lock = smp_processor_id(); \
199 restore_flags(nsdsf); } while (0)
200 #else /* !NS_DEBUG_SPINLOCKS */
201 #define ns_grab_int_lock(card,flags) \
202 spin_lock_irqsave(&(card)->int_lock,(flags))
203 #define ns_grab_res_lock(card,flags) \
204 spin_lock_irqsave(&(card)->res_lock,(flags))
205 #define ns_grab_scq_lock(card,scq,flags) \
206 spin_lock_irqsave(&(scq)->lock,flags)
207 #endif /* NS_DEBUG_SPINLOCKS */
208
209
210 /* Function declarations ******************************************************/
211
212 static u32 ns_read_sram(ns_dev *card, u32 sram_address);
213 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
214 static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
215 static void __devinit ns_init_card_error(ns_dev *card, int error);
216 static scq_info *get_scq(int size, u32 scd);
217 static void free_scq(scq_info *scq, struct atm_vcc *vcc);
218 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
219 u32 handle2, u32 addr2);
220 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
221 static int ns_open(struct atm_vcc *vcc, short vpi, int vci);
222 static void ns_close(struct atm_vcc *vcc);
223 static void fill_tst(ns_dev *card, int n, vc_map *vc);
224 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
225 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
226 struct sk_buff *skb);
227 static void process_tsq(ns_dev *card);
228 static void drain_scq(ns_dev *card, scq_info *scq, int pos);
229 static void process_rsq(ns_dev *card);
230 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
231 #ifdef NS_USE_DESTRUCTORS
232 static void ns_sb_destructor(struct sk_buff *sb);
233 static void ns_lb_destructor(struct sk_buff *lb);
234 static void ns_hb_destructor(struct sk_buff *hb);
235 #endif /* NS_USE_DESTRUCTORS */
236 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
237 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
238 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
239 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
240 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
241 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
242 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg);
243 static void which_list(ns_dev *card, struct sk_buff *skb);
244 static void ns_poll(unsigned long arg);
245 static int ns_parse_mac(char *mac, unsigned char *esi);
246 static short ns_h2i(char c);
247 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
248 unsigned long addr);
249 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
250
251
252
253 /* Global variables ***********************************************************/
254
255 static struct ns_dev *cards[NS_MAX_CARDS];
256 static unsigned num_cards;
257 static struct atmdev_ops atm_ops =
258 {
259 open: ns_open,
260 close: ns_close,
261 ioctl: ns_ioctl,
262 send: ns_send,
263 phy_put: ns_phy_put,
264 phy_get: ns_phy_get,
265 proc_read: ns_proc_read,
266 owner: THIS_MODULE,
267 };
268 static struct timer_list ns_timer;
269 static char *mac[NS_MAX_CARDS];
270 MODULE_PARM(mac, "1-" __MODULE_STRING(NS_MAX_CARDS) "s");
271 MODULE_LICENSE("GPL");
272
273
274 /* Functions*******************************************************************/
275
nicstar_init_one(struct pci_dev * pcidev,const struct pci_device_id * ent)276 static int __devinit nicstar_init_one(struct pci_dev *pcidev,
277 const struct pci_device_id *ent)
278 {
279 static int index = -1;
280 unsigned int error;
281
282 index++;
283 cards[index] = NULL;
284
285 error = ns_init_card(index, pcidev);
286 if (error) {
287 cards[index--] = NULL; /* don't increment index */
288 goto err_out;
289 }
290
291 return 0;
292 err_out:
293 return -ENODEV;
294 }
295
296
297
nicstar_remove_one(struct pci_dev * pcidev)298 static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
299 {
300 int i, j;
301 ns_dev *card = pci_get_drvdata(pcidev);
302 struct sk_buff *hb;
303 struct sk_buff *iovb;
304 struct sk_buff *lb;
305 struct sk_buff *sb;
306
307 i = card->index;
308
309 if (cards[i] == NULL)
310 return;
311
312 if (card->atmdev->phy && card->atmdev->phy->stop)
313 card->atmdev->phy->stop(card->atmdev);
314
315 /* Stop everything */
316 writel(0x00000000, card->membase + CFG);
317
318 /* De-register device */
319 atm_dev_deregister(card->atmdev);
320
321 /* Disable PCI device */
322 pci_disable_device(pcidev);
323
324 /* Free up resources */
325 j = 0;
326 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
327 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
328 {
329 dev_kfree_skb_any(hb);
330 j++;
331 }
332 PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
333 j = 0;
334 PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
335 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
336 {
337 dev_kfree_skb_any(iovb);
338 j++;
339 }
340 PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
341 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
342 dev_kfree_skb_any(lb);
343 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
344 dev_kfree_skb_any(sb);
345 free_scq(card->scq0, NULL);
346 for (j = 0; j < NS_FRSCD_NUM; j++)
347 {
348 if (card->scd2vc[j] != NULL)
349 free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
350 }
351 kfree(card->rsq.org);
352 kfree(card->tsq.org);
353 free_irq(card->pcidev->irq, card);
354 iounmap((void *) card->membase);
355 kfree(card);
356 }
357
358
359
360 static struct pci_device_id nicstar_pci_tbl[] __devinitdata =
361 {
362 {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201,
363 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
364 {0,} /* terminate list */
365 };
366 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
367
368
369
370 static struct pci_driver nicstar_driver = {
371 .name = "nicstar",
372 .id_table = nicstar_pci_tbl,
373 .probe = nicstar_init_one,
374 .remove = __devexit_p(nicstar_remove_one),
375 };
376
377
378
nicstar_init(void)379 static int __init nicstar_init(void)
380 {
381 unsigned error = 0; /* Initialized to remove compile warning */
382
383 XPRINTK("nicstar: nicstar_init() called.\n");
384
385 error = pci_module_init(&nicstar_driver);
386
387 TXPRINTK("nicstar: TX debug enabled.\n");
388 RXPRINTK("nicstar: RX debug enabled.\n");
389 PRINTK("nicstar: General debug enabled.\n");
390 #ifdef PHY_LOOPBACK
391 printk("nicstar: using PHY loopback.\n");
392 #endif /* PHY_LOOPBACK */
393 XPRINTK("nicstar: nicstar_init() returned.\n");
394
395 if (!error) {
396 init_timer(&ns_timer);
397 ns_timer.expires = jiffies + NS_POLL_PERIOD;
398 ns_timer.data = 0UL;
399 ns_timer.function = ns_poll;
400 add_timer(&ns_timer);
401 }
402
403 return error;
404 }
405
406
407
nicstar_cleanup(void)408 static void __exit nicstar_cleanup(void)
409 {
410 XPRINTK("nicstar: nicstar_cleanup() called.\n");
411
412 if (MOD_IN_USE)
413 printk("nicstar: module in use, remove delayed.\n");
414
415 del_timer(&ns_timer);
416
417 pci_unregister_driver(&nicstar_driver);
418
419 XPRINTK("nicstar: nicstar_cleanup() returned.\n");
420 }
421
422
423
ns_read_sram(ns_dev * card,u32 sram_address)424 static u32 ns_read_sram(ns_dev *card, u32 sram_address)
425 {
426 unsigned long flags;
427 u32 data;
428 sram_address <<= 2;
429 sram_address &= 0x0007FFFC; /* address must be dword aligned */
430 sram_address |= 0x50000000; /* SRAM read command */
431 ns_grab_res_lock(card, flags);
432 while (CMD_BUSY(card));
433 writel(sram_address, card->membase + CMD);
434 while (CMD_BUSY(card));
435 data = readl(card->membase + DR0);
436 spin_unlock_irqrestore(&card->res_lock, flags);
437 return data;
438 }
439
440
441
ns_write_sram(ns_dev * card,u32 sram_address,u32 * value,int count)442 static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
443 {
444 unsigned long flags;
445 int i, c;
446 count--; /* count range now is 0..3 instead of 1..4 */
447 c = count;
448 c <<= 2; /* to use increments of 4 */
449 ns_grab_res_lock(card, flags);
450 while (CMD_BUSY(card));
451 for (i = 0; i <= c; i += 4)
452 writel(*(value++), card->membase + i);
453 /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
454 so card->membase + DR0 == card->membase */
455 sram_address <<= 2;
456 sram_address &= 0x0007FFFC;
457 sram_address |= (0x40000000 | count);
458 writel(sram_address, card->membase + CMD);
459 spin_unlock_irqrestore(&card->res_lock, flags);
460 }
461
462
ns_init_card(int i,struct pci_dev * pcidev)463 static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
464 {
465 int j;
466 struct ns_dev *card = NULL;
467 unsigned char pci_latency;
468 unsigned error;
469 u32 data;
470 u32 u32d[4];
471 u32 ns_cfg_rctsize;
472 int bcount;
473
474 error = 0;
475
476 if (pci_enable_device(pcidev))
477 {
478 printk("nicstar%d: can't enable PCI device\n", i);
479 error = 2;
480 ns_init_card_error(card, error);
481 return error;
482 }
483
484 if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
485 {
486 printk("nicstar%d: can't allocate memory for device structure.\n", i);
487 error = 2;
488 ns_init_card_error(card, error);
489 return error;
490 }
491 cards[i] = card;
492 spin_lock_init(&card->int_lock);
493 spin_lock_init(&card->res_lock);
494
495 pci_set_drvdata(pcidev, card);
496
497 card->index = i;
498 card->atmdev = NULL;
499 card->pcidev = pcidev;
500 card->membase = pci_resource_start(pcidev, 1);
501 card->membase = (unsigned long) ioremap(card->membase, NS_IOREMAP_SIZE);
502 if (card->membase == 0)
503 {
504 printk("nicstar%d: can't ioremap() membase.\n",i);
505 error = 3;
506 ns_init_card_error(card, error);
507 return error;
508 }
509 PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
510
511 pci_set_master(pcidev);
512
513 if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
514 {
515 printk("nicstar%d: can't read PCI latency timer.\n", i);
516 error = 6;
517 ns_init_card_error(card, error);
518 return error;
519 }
520 #ifdef NS_PCI_LATENCY
521 if (pci_latency < NS_PCI_LATENCY)
522 {
523 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
524 for (j = 1; j < 4; j++)
525 {
526 if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
527 break;
528 }
529 if (j == 4)
530 {
531 printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
532 error = 7;
533 ns_init_card_error(card, error);
534 return error;
535 }
536 }
537 #endif /* NS_PCI_LATENCY */
538
539 /* Clear timer overflow */
540 data = readl(card->membase + STAT);
541 if (data & NS_STAT_TMROF)
542 writel(NS_STAT_TMROF, card->membase + STAT);
543
544 /* Software reset */
545 writel(NS_CFG_SWRST, card->membase + CFG);
546 NS_DELAY;
547 writel(0x00000000, card->membase + CFG);
548
549 /* PHY reset */
550 writel(0x00000008, card->membase + GP);
551 NS_DELAY;
552 writel(0x00000001, card->membase + GP);
553 NS_DELAY;
554 while (CMD_BUSY(card));
555 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
556 NS_DELAY;
557
558 /* Detect PHY type */
559 while (CMD_BUSY(card));
560 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
561 while (CMD_BUSY(card));
562 data = readl(card->membase + DR0);
563 switch(data) {
564 case 0x00000009:
565 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
566 card->max_pcr = ATM_25_PCR;
567 while(CMD_BUSY(card));
568 writel(0x00000008, card->membase + DR0);
569 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
570 /* Clear an eventual pending interrupt */
571 writel(NS_STAT_SFBQF, card->membase + STAT);
572 #ifdef PHY_LOOPBACK
573 while(CMD_BUSY(card));
574 writel(0x00000022, card->membase + DR0);
575 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
576 #endif /* PHY_LOOPBACK */
577 break;
578 case 0x00000030:
579 case 0x00000031:
580 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
581 card->max_pcr = ATM_OC3_PCR;
582 #ifdef PHY_LOOPBACK
583 while(CMD_BUSY(card));
584 writel(0x00000002, card->membase + DR0);
585 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
586 #endif /* PHY_LOOPBACK */
587 break;
588 default:
589 printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
590 error = 8;
591 ns_init_card_error(card, error);
592 return error;
593 }
594 writel(0x00000000, card->membase + GP);
595
596 /* Determine SRAM size */
597 data = 0x76543210;
598 ns_write_sram(card, 0x1C003, &data, 1);
599 data = 0x89ABCDEF;
600 ns_write_sram(card, 0x14003, &data, 1);
601 if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
602 ns_read_sram(card, 0x1C003) == 0x76543210)
603 card->sram_size = 128;
604 else
605 card->sram_size = 32;
606 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
607
608 card->rct_size = NS_MAX_RCTSIZE;
609
610 #if (NS_MAX_RCTSIZE == 4096)
611 if (card->sram_size == 128)
612 printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
613 #elif (NS_MAX_RCTSIZE == 16384)
614 if (card->sram_size == 32)
615 {
616 printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
617 card->rct_size = 4096;
618 }
619 #else
620 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
621 #endif
622
623 card->vpibits = NS_VPIBITS;
624 if (card->rct_size == 4096)
625 card->vcibits = 12 - NS_VPIBITS;
626 else /* card->rct_size == 16384 */
627 card->vcibits = 14 - NS_VPIBITS;
628
629 /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
630 if (mac[i] == NULL)
631 nicstar_init_eprom(card->membase);
632
633 if (request_irq(pcidev->irq, &ns_irq_handler, SA_INTERRUPT | SA_SHIRQ, "nicstar", card) != 0)
634 {
635 printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
636 error = 9;
637 ns_init_card_error(card, error);
638 return error;
639 }
640
641 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
642 writel(0x00000000, card->membase + VPM);
643
644 /* Initialize TSQ */
645 card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
646 if (card->tsq.org == NULL)
647 {
648 printk("nicstar%d: can't allocate TSQ.\n", i);
649 error = 10;
650 ns_init_card_error(card, error);
651 return error;
652 }
653 card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
654 card->tsq.next = card->tsq.base;
655 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
656 for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
657 ns_tsi_init(card->tsq.base + j);
658 writel(0x00000000, card->membase + TSQH);
659 writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
660 PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base,
661 (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
662
663 /* Initialize RSQ */
664 card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
665 if (card->rsq.org == NULL)
666 {
667 printk("nicstar%d: can't allocate RSQ.\n", i);
668 error = 11;
669 ns_init_card_error(card, error);
670 return error;
671 }
672 card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
673 card->rsq.next = card->rsq.base;
674 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
675 for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
676 ns_rsqe_init(card->rsq.base + j);
677 writel(0x00000000, card->membase + RSQH);
678 writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
679 PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
680
681 /* Initialize SCQ0, the only VBR SCQ used */
682 card->scq1 = (scq_info *) NULL;
683 card->scq2 = (scq_info *) NULL;
684 card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
685 if (card->scq0 == (scq_info *) NULL)
686 {
687 printk("nicstar%d: can't get SCQ0.\n", i);
688 error = 12;
689 ns_init_card_error(card, error);
690 return error;
691 }
692 u32d[0] = (u32) virt_to_bus(card->scq0->base);
693 u32d[1] = (u32) 0x00000000;
694 u32d[2] = (u32) 0xffffffff;
695 u32d[3] = (u32) 0x00000000;
696 ns_write_sram(card, NS_VRSCD0, u32d, 4);
697 ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */
698 ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */
699 card->scq0->scd = NS_VRSCD0;
700 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
701
702 /* Initialize TSTs */
703 card->tst_addr = NS_TST0;
704 card->tst_free_entries = NS_TST_NUM_ENTRIES;
705 data = NS_TST_OPCODE_VARIABLE;
706 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
707 ns_write_sram(card, NS_TST0 + j, &data, 1);
708 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
709 ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
710 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
711 ns_write_sram(card, NS_TST1 + j, &data, 1);
712 data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
713 ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
714 for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
715 card->tste2vc[j] = NULL;
716 writel(NS_TST0 << 2, card->membase + TSTB);
717
718
719 /* Initialize RCT. AAL type is set on opening the VC. */
720 #ifdef RCQ_SUPPORT
721 u32d[0] = NS_RCTE_RAWCELLINTEN;
722 #else
723 u32d[0] = 0x00000000;
724 #endif /* RCQ_SUPPORT */
725 u32d[1] = 0x00000000;
726 u32d[2] = 0x00000000;
727 u32d[3] = 0xFFFFFFFF;
728 for (j = 0; j < card->rct_size; j++)
729 ns_write_sram(card, j * 4, u32d, 4);
730
731 memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
732
733 for (j = 0; j < NS_FRSCD_NUM; j++)
734 card->scd2vc[j] = NULL;
735
736 /* Initialize buffer levels */
737 card->sbnr.min = MIN_SB;
738 card->sbnr.init = NUM_SB;
739 card->sbnr.max = MAX_SB;
740 card->lbnr.min = MIN_LB;
741 card->lbnr.init = NUM_LB;
742 card->lbnr.max = MAX_LB;
743 card->iovnr.min = MIN_IOVB;
744 card->iovnr.init = NUM_IOVB;
745 card->iovnr.max = MAX_IOVB;
746 card->hbnr.min = MIN_HB;
747 card->hbnr.init = NUM_HB;
748 card->hbnr.max = MAX_HB;
749
750 card->sm_handle = 0x00000000;
751 card->sm_addr = 0x00000000;
752 card->lg_handle = 0x00000000;
753 card->lg_addr = 0x00000000;
754
755 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
756
757 /* Pre-allocate some huge buffers */
758 skb_queue_head_init(&card->hbpool.queue);
759 card->hbpool.count = 0;
760 for (j = 0; j < NUM_HB; j++)
761 {
762 struct sk_buff *hb;
763 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
764 if (hb == NULL)
765 {
766 printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
767 i, j, NUM_HB);
768 error = 13;
769 ns_init_card_error(card, error);
770 return error;
771 }
772 skb_queue_tail(&card->hbpool.queue, hb);
773 card->hbpool.count++;
774 }
775
776
777 /* Allocate large buffers */
778 skb_queue_head_init(&card->lbpool.queue);
779 card->lbpool.count = 0; /* Not used */
780 for (j = 0; j < NUM_LB; j++)
781 {
782 struct sk_buff *lb;
783 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
784 if (lb == NULL)
785 {
786 printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
787 i, j, NUM_LB);
788 error = 14;
789 ns_init_card_error(card, error);
790 return error;
791 }
792 skb_queue_tail(&card->lbpool.queue, lb);
793 skb_reserve(lb, NS_SMBUFSIZE);
794 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
795 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
796 if (j == 1)
797 {
798 card->rcbuf = lb;
799 card->rawch = (u32) virt_to_bus(lb->data);
800 }
801 }
802 /* Test for strange behaviour which leads to crashes */
803 if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
804 {
805 printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
806 i, j, bcount);
807 error = 14;
808 ns_init_card_error(card, error);
809 return error;
810 }
811
812
813 /* Allocate small buffers */
814 skb_queue_head_init(&card->sbpool.queue);
815 card->sbpool.count = 0; /* Not used */
816 for (j = 0; j < NUM_SB; j++)
817 {
818 struct sk_buff *sb;
819 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
820 if (sb == NULL)
821 {
822 printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
823 i, j, NUM_SB);
824 error = 15;
825 ns_init_card_error(card, error);
826 return error;
827 }
828 skb_queue_tail(&card->sbpool.queue, sb);
829 skb_reserve(sb, NS_AAL0_HEADER);
830 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
831 }
832 /* Test for strange behaviour which leads to crashes */
833 if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
834 {
835 printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
836 i, j, bcount);
837 error = 15;
838 ns_init_card_error(card, error);
839 return error;
840 }
841
842
843 /* Allocate iovec buffers */
844 skb_queue_head_init(&card->iovpool.queue);
845 card->iovpool.count = 0;
846 for (j = 0; j < NUM_IOVB; j++)
847 {
848 struct sk_buff *iovb;
849 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
850 if (iovb == NULL)
851 {
852 printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
853 i, j, NUM_IOVB);
854 error = 16;
855 ns_init_card_error(card, error);
856 return error;
857 }
858 skb_queue_tail(&card->iovpool.queue, iovb);
859 card->iovpool.count++;
860 }
861
862 card->intcnt = 0;
863
864 /* Configure NICStAR */
865 if (card->rct_size == 4096)
866 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
867 else /* (card->rct_size == 16384) */
868 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
869
870 card->efbie = 1;
871
872 /* Register device */
873 card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
874 if (card->atmdev == NULL)
875 {
876 printk("nicstar%d: can't register device.\n", i);
877 error = 17;
878 ns_init_card_error(card, error);
879 return error;
880 }
881
882 if (ns_parse_mac(mac[i], card->atmdev->esi)) {
883 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
884 card->atmdev->esi, 6);
885 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) {
886 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
887 card->atmdev->esi, 6);
888 }
889 }
890
891 printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
892 card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
893 card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
894
895 card->atmdev->dev_data = card;
896 card->atmdev->ci_range.vpi_bits = card->vpibits;
897 card->atmdev->ci_range.vci_bits = card->vcibits;
898 card->atmdev->link_rate = card->max_pcr;
899 card->atmdev->phy = NULL;
900
901 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
902 if (card->max_pcr == ATM_OC3_PCR)
903 suni_init(card->atmdev);
904 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
905
906 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
907 if (card->max_pcr == ATM_25_PCR)
908 idt77105_init(card->atmdev);
909 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
910
911 if (card->atmdev->phy && card->atmdev->phy->start)
912 card->atmdev->phy->start(card->atmdev);
913
914 writel(NS_CFG_RXPATH |
915 NS_CFG_SMBUFSIZE |
916 NS_CFG_LGBUFSIZE |
917 NS_CFG_EFBIE |
918 NS_CFG_RSQSIZE |
919 NS_CFG_VPIBITS |
920 ns_cfg_rctsize |
921 NS_CFG_RXINT_NODELAY |
922 NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */
923 NS_CFG_RSQAFIE |
924 NS_CFG_TXEN |
925 NS_CFG_TXIE |
926 NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
927 NS_CFG_PHYIE,
928 card->membase + CFG);
929
930 num_cards++;
931
932 return error;
933 }
934
935
936
ns_init_card_error(ns_dev * card,int error)937 static void __devinit ns_init_card_error(ns_dev *card, int error)
938 {
939 if (error >= 17)
940 {
941 writel(0x00000000, card->membase + CFG);
942 }
943 if (error >= 16)
944 {
945 struct sk_buff *iovb;
946 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
947 dev_kfree_skb_any(iovb);
948 }
949 if (error >= 15)
950 {
951 struct sk_buff *sb;
952 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
953 dev_kfree_skb_any(sb);
954 free_scq(card->scq0, NULL);
955 }
956 if (error >= 14)
957 {
958 struct sk_buff *lb;
959 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
960 dev_kfree_skb_any(lb);
961 }
962 if (error >= 13)
963 {
964 struct sk_buff *hb;
965 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
966 dev_kfree_skb_any(hb);
967 }
968 if (error >= 12)
969 {
970 kfree(card->rsq.org);
971 }
972 if (error >= 11)
973 {
974 kfree(card->tsq.org);
975 }
976 if (error >= 10)
977 {
978 free_irq(card->pcidev->irq, card);
979 }
980 if (error >= 4)
981 {
982 iounmap((void *) card->membase);
983 }
984 if (error >= 3)
985 {
986 pci_disable_device(card->pcidev);
987 kfree(card);
988 }
989 }
990
991
992
get_scq(int size,u32 scd)993 static scq_info *get_scq(int size, u32 scd)
994 {
995 scq_info *scq;
996 int i;
997
998 if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
999 return (scq_info *) NULL;
1000
1001 scq = (scq_info *) kmalloc(sizeof(scq_info), GFP_KERNEL);
1002 if (scq == (scq_info *) NULL)
1003 return (scq_info *) NULL;
1004 scq->org = kmalloc(2 * size, GFP_KERNEL);
1005 if (scq->org == NULL)
1006 {
1007 kfree(scq);
1008 return (scq_info *) NULL;
1009 }
1010 scq->skb = (struct sk_buff **) kmalloc(sizeof(struct sk_buff *) *
1011 (size / NS_SCQE_SIZE), GFP_KERNEL);
1012 if (scq->skb == (struct sk_buff **) NULL)
1013 {
1014 kfree(scq->org);
1015 kfree(scq);
1016 return (scq_info *) NULL;
1017 }
1018 scq->num_entries = size / NS_SCQE_SIZE;
1019 scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
1020 scq->next = scq->base;
1021 scq->last = scq->base + (scq->num_entries - 1);
1022 scq->tail = scq->last;
1023 scq->scd = scd;
1024 scq->num_entries = size / NS_SCQE_SIZE;
1025 scq->tbd_count = 0;
1026 init_waitqueue_head(&scq->scqfull_waitq);
1027 scq->full = 0;
1028 spin_lock_init(&scq->lock);
1029
1030 for (i = 0; i < scq->num_entries; i++)
1031 scq->skb[i] = NULL;
1032
1033 return scq;
1034 }
1035
1036
1037
1038 /* For variable rate SCQ vcc must be NULL */
free_scq(scq_info * scq,struct atm_vcc * vcc)1039 static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1040 {
1041 int i;
1042
1043 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1044 for (i = 0; i < scq->num_entries; i++)
1045 {
1046 if (scq->skb[i] != NULL)
1047 {
1048 vcc = ATM_SKB(scq->skb[i])->vcc;
1049 if (vcc->pop != NULL)
1050 vcc->pop(vcc, scq->skb[i]);
1051 else
1052 dev_kfree_skb_any(scq->skb[i]);
1053 }
1054 }
1055 else /* vcc must be != NULL */
1056 {
1057 if (vcc == NULL)
1058 {
1059 printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
1060 for (i = 0; i < scq->num_entries; i++)
1061 dev_kfree_skb_any(scq->skb[i]);
1062 }
1063 else
1064 for (i = 0; i < scq->num_entries; i++)
1065 {
1066 if (scq->skb[i] != NULL)
1067 {
1068 if (vcc->pop != NULL)
1069 vcc->pop(vcc, scq->skb[i]);
1070 else
1071 dev_kfree_skb_any(scq->skb[i]);
1072 }
1073 }
1074 }
1075 kfree(scq->skb);
1076 kfree(scq->org);
1077 kfree(scq);
1078 }
1079
1080
1081
1082 /* The handles passed must be pointers to the sk_buff containing the small
1083 or large buffer(s) cast to u32. */
push_rxbufs(ns_dev * card,u32 type,u32 handle1,u32 addr1,u32 handle2,u32 addr2)1084 static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
1085 u32 handle2, u32 addr2)
1086 {
1087 u32 stat;
1088 unsigned long flags;
1089
1090
1091 #ifdef GENERAL_DEBUG
1092 if (!addr1)
1093 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
1094 #endif /* GENERAL_DEBUG */
1095
1096 stat = readl(card->membase + STAT);
1097 card->sbfqc = ns_stat_sfbqc_get(stat);
1098 card->lbfqc = ns_stat_lfbqc_get(stat);
1099 if (type == BUF_SM)
1100 {
1101 if (!addr2)
1102 {
1103 if (card->sm_addr)
1104 {
1105 addr2 = card->sm_addr;
1106 handle2 = card->sm_handle;
1107 card->sm_addr = 0x00000000;
1108 card->sm_handle = 0x00000000;
1109 }
1110 else /* (!sm_addr) */
1111 {
1112 card->sm_addr = addr1;
1113 card->sm_handle = handle1;
1114 }
1115 }
1116 }
1117 else /* type == BUF_LG */
1118 {
1119 if (!addr2)
1120 {
1121 if (card->lg_addr)
1122 {
1123 addr2 = card->lg_addr;
1124 handle2 = card->lg_handle;
1125 card->lg_addr = 0x00000000;
1126 card->lg_handle = 0x00000000;
1127 }
1128 else /* (!lg_addr) */
1129 {
1130 card->lg_addr = addr1;
1131 card->lg_handle = handle1;
1132 }
1133 }
1134 }
1135
1136 if (addr2)
1137 {
1138 if (type == BUF_SM)
1139 {
1140 if (card->sbfqc >= card->sbnr.max)
1141 {
1142 skb_unlink((struct sk_buff *) handle1);
1143 dev_kfree_skb_any((struct sk_buff *) handle1);
1144 skb_unlink((struct sk_buff *) handle2);
1145 dev_kfree_skb_any((struct sk_buff *) handle2);
1146 return;
1147 }
1148 else
1149 card->sbfqc += 2;
1150 }
1151 else /* (type == BUF_LG) */
1152 {
1153 if (card->lbfqc >= card->lbnr.max)
1154 {
1155 skb_unlink((struct sk_buff *) handle1);
1156 dev_kfree_skb_any((struct sk_buff *) handle1);
1157 skb_unlink((struct sk_buff *) handle2);
1158 dev_kfree_skb_any((struct sk_buff *) handle2);
1159 return;
1160 }
1161 else
1162 card->lbfqc += 2;
1163 }
1164
1165 ns_grab_res_lock(card, flags);
1166
1167 while (CMD_BUSY(card));
1168 writel(addr2, card->membase + DR3);
1169 writel(handle2, card->membase + DR2);
1170 writel(addr1, card->membase + DR1);
1171 writel(handle1, card->membase + DR0);
1172 writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD);
1173
1174 spin_unlock_irqrestore(&card->res_lock, flags);
1175
1176 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1177 (type == BUF_SM ? "small" : "large"), addr1, addr2);
1178 }
1179
1180 if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1181 card->lbfqc >= card->lbnr.min)
1182 {
1183 card->efbie = 1;
1184 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
1185 }
1186
1187 return;
1188 }
1189
1190
1191
ns_irq_handler(int irq,void * dev_id,struct pt_regs * regs)1192 static void ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
1193 {
1194 u32 stat_r;
1195 ns_dev *card;
1196 struct atm_dev *dev;
1197 unsigned long flags;
1198
1199 card = (ns_dev *) dev_id;
1200 dev = card->atmdev;
1201 card->intcnt++;
1202
1203 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1204
1205 ns_grab_int_lock(card, flags);
1206
1207 stat_r = readl(card->membase + STAT);
1208
1209 /* Transmit Status Indicator has been written to T. S. Queue */
1210 if (stat_r & NS_STAT_TSIF)
1211 {
1212 TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1213 process_tsq(card);
1214 writel(NS_STAT_TSIF, card->membase + STAT);
1215 }
1216
1217 /* Incomplete CS-PDU has been transmitted */
1218 if (stat_r & NS_STAT_TXICP)
1219 {
1220 writel(NS_STAT_TXICP, card->membase + STAT);
1221 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1222 card->index);
1223 }
1224
1225 /* Transmit Status Queue 7/8 full */
1226 if (stat_r & NS_STAT_TSQF)
1227 {
1228 writel(NS_STAT_TSQF, card->membase + STAT);
1229 PRINTK("nicstar%d: TSQ full.\n", card->index);
1230 process_tsq(card);
1231 }
1232
1233 /* Timer overflow */
1234 if (stat_r & NS_STAT_TMROF)
1235 {
1236 writel(NS_STAT_TMROF, card->membase + STAT);
1237 PRINTK("nicstar%d: Timer overflow.\n", card->index);
1238 }
1239
1240 /* PHY device interrupt signal active */
1241 if (stat_r & NS_STAT_PHYI)
1242 {
1243 writel(NS_STAT_PHYI, card->membase + STAT);
1244 PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1245 if (dev->phy && dev->phy->interrupt) {
1246 dev->phy->interrupt(dev);
1247 }
1248 }
1249
1250 /* Small Buffer Queue is full */
1251 if (stat_r & NS_STAT_SFBQF)
1252 {
1253 writel(NS_STAT_SFBQF, card->membase + STAT);
1254 printk("nicstar%d: Small free buffer queue is full.\n", card->index);
1255 }
1256
1257 /* Large Buffer Queue is full */
1258 if (stat_r & NS_STAT_LFBQF)
1259 {
1260 writel(NS_STAT_LFBQF, card->membase + STAT);
1261 printk("nicstar%d: Large free buffer queue is full.\n", card->index);
1262 }
1263
1264 /* Receive Status Queue is full */
1265 if (stat_r & NS_STAT_RSQF)
1266 {
1267 writel(NS_STAT_RSQF, card->membase + STAT);
1268 printk("nicstar%d: RSQ full.\n", card->index);
1269 process_rsq(card);
1270 }
1271
1272 /* Complete CS-PDU received */
1273 if (stat_r & NS_STAT_EOPDU)
1274 {
1275 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1276 process_rsq(card);
1277 writel(NS_STAT_EOPDU, card->membase + STAT);
1278 }
1279
1280 /* Raw cell received */
1281 if (stat_r & NS_STAT_RAWCF)
1282 {
1283 writel(NS_STAT_RAWCF, card->membase + STAT);
1284 #ifndef RCQ_SUPPORT
1285 printk("nicstar%d: Raw cell received and no support yet...\n",
1286 card->index);
1287 #endif /* RCQ_SUPPORT */
1288 /* NOTE: the following procedure may keep a raw cell pending until the
1289 next interrupt. As this preliminary support is only meant to
1290 avoid buffer leakage, this is not an issue. */
1291 while (readl(card->membase + RAWCT) != card->rawch)
1292 {
1293 ns_rcqe *rawcell;
1294
1295 rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
1296 if (ns_rcqe_islast(rawcell))
1297 {
1298 struct sk_buff *oldbuf;
1299
1300 oldbuf = card->rcbuf;
1301 card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
1302 card->rawch = (u32) virt_to_bus(card->rcbuf->data);
1303 recycle_rx_buf(card, oldbuf);
1304 }
1305 else
1306 card->rawch += NS_RCQE_SIZE;
1307 }
1308 }
1309
1310 /* Small buffer queue is empty */
1311 if (stat_r & NS_STAT_SFBQE)
1312 {
1313 int i;
1314 struct sk_buff *sb;
1315
1316 writel(NS_STAT_SFBQE, card->membase + STAT);
1317 printk("nicstar%d: Small free buffer queue empty.\n",
1318 card->index);
1319 for (i = 0; i < card->sbnr.min; i++)
1320 {
1321 sb = dev_alloc_skb(NS_SMSKBSIZE);
1322 if (sb == NULL)
1323 {
1324 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1325 card->efbie = 0;
1326 break;
1327 }
1328 skb_queue_tail(&card->sbpool.queue, sb);
1329 skb_reserve(sb, NS_AAL0_HEADER);
1330 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
1331 }
1332 card->sbfqc = i;
1333 process_rsq(card);
1334 }
1335
1336 /* Large buffer queue empty */
1337 if (stat_r & NS_STAT_LFBQE)
1338 {
1339 int i;
1340 struct sk_buff *lb;
1341
1342 writel(NS_STAT_LFBQE, card->membase + STAT);
1343 printk("nicstar%d: Large free buffer queue empty.\n",
1344 card->index);
1345 for (i = 0; i < card->lbnr.min; i++)
1346 {
1347 lb = dev_alloc_skb(NS_LGSKBSIZE);
1348 if (lb == NULL)
1349 {
1350 writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1351 card->efbie = 0;
1352 break;
1353 }
1354 skb_queue_tail(&card->lbpool.queue, lb);
1355 skb_reserve(lb, NS_SMBUFSIZE);
1356 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
1357 }
1358 card->lbfqc = i;
1359 process_rsq(card);
1360 }
1361
1362 /* Receive Status Queue is 7/8 full */
1363 if (stat_r & NS_STAT_RSQAF)
1364 {
1365 writel(NS_STAT_RSQAF, card->membase + STAT);
1366 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1367 process_rsq(card);
1368 }
1369
1370 spin_unlock_irqrestore(&card->int_lock, flags);
1371 PRINTK("nicstar%d: end of interrupt service\n", card->index);
1372 }
1373
1374
1375
ns_open(struct atm_vcc * vcc,short vpi,int vci)1376 static int ns_open(struct atm_vcc *vcc, short vpi, int vci)
1377 {
1378 ns_dev *card;
1379 vc_map *vc;
1380 int error;
1381 unsigned long tmpl, modl;
1382 int tcr, tcra; /* target cell rate, and absolute value */
1383 int n = 0; /* Number of entries in the TST. Initialized to remove
1384 the compiler warning. */
1385 u32 u32d[4];
1386 int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler
1387 warning. How I wish compilers were clever enough to
1388 tell which variables can truly be used
1389 uninitialized... */
1390 int inuse; /* tx or rx vc already in use by another vcc */
1391
1392 card = (ns_dev *) vcc->dev->dev_data;
1393 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
1394 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1395 {
1396 PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1397 return -EINVAL;
1398 }
1399
1400 if ((error = atm_find_ci(vcc, &vpi, &vci)))
1401 {
1402 PRINTK("nicstar%d: error in atm_find_ci().\n", card->index);
1403 return error;
1404 }
1405 vc = &(card->vcmap[vpi << card->vcibits | vci]);
1406 vcc->vpi = vpi;
1407 vcc->vci = vci;
1408 vcc->dev_data = vc;
1409
1410 inuse = 0;
1411 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1412 inuse = 1;
1413 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1414 inuse += 2;
1415 if (inuse)
1416 {
1417 printk("nicstar%d: %s vci already in use.\n", card->index,
1418 inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1419 return -EINVAL;
1420 }
1421
1422 set_bit(ATM_VF_ADDR,&vcc->flags);
1423
1424 /* NOTE: You are not allowed to modify an open connection's QOS. To change
1425 that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1426 needed to do that. */
1427 if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
1428 {
1429 scq_info *scq;
1430
1431 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1432 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1433 {
1434 /* Check requested cell rate and availability of SCD */
1435 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
1436 vcc->qos.txtp.min_pcr == 0)
1437 {
1438 PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1439 card->index);
1440 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1441 clear_bit(ATM_VF_ADDR,&vcc->flags);
1442 return -EINVAL;
1443 }
1444
1445 tcr = atm_pcr_goal(&(vcc->qos.txtp));
1446 tcra = tcr >= 0 ? tcr : -tcr;
1447
1448 PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
1449 vcc->qos.txtp.max_pcr);
1450
1451 tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES;
1452 modl = tmpl % card->max_pcr;
1453
1454 n = (int)(tmpl / card->max_pcr);
1455 if (tcr > 0)
1456 {
1457 if (modl > 0) n++;
1458 }
1459 else if (tcr == 0)
1460 {
1461 if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
1462 {
1463 PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
1464 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1465 clear_bit(ATM_VF_ADDR,&vcc->flags);
1466 return -EINVAL;
1467 }
1468 }
1469
1470 if (n == 0)
1471 {
1472 printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
1473 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1474 clear_bit(ATM_VF_ADDR,&vcc->flags);
1475 return -EINVAL;
1476 }
1477
1478 if (n > (card->tst_free_entries - NS_TST_RESERVED))
1479 {
1480 PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
1481 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1482 clear_bit(ATM_VF_ADDR,&vcc->flags);
1483 return -EINVAL;
1484 }
1485 else
1486 card->tst_free_entries -= n;
1487
1488 XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
1489 for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
1490 {
1491 if (card->scd2vc[frscdi] == NULL)
1492 {
1493 card->scd2vc[frscdi] = vc;
1494 break;
1495 }
1496 }
1497 if (frscdi == NS_FRSCD_NUM)
1498 {
1499 PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
1500 card->tst_free_entries += n;
1501 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1502 clear_bit(ATM_VF_ADDR,&vcc->flags);
1503 return -EBUSY;
1504 }
1505
1506 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1507
1508 scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
1509 if (scq == (scq_info *) NULL)
1510 {
1511 PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
1512 card->scd2vc[frscdi] = NULL;
1513 card->tst_free_entries += n;
1514 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1515 clear_bit(ATM_VF_ADDR,&vcc->flags);
1516 return -ENOMEM;
1517 }
1518 vc->scq = scq;
1519 u32d[0] = (u32) virt_to_bus(scq->base);
1520 u32d[1] = (u32) 0x00000000;
1521 u32d[2] = (u32) 0xffffffff;
1522 u32d[3] = (u32) 0x00000000;
1523 ns_write_sram(card, vc->cbr_scd, u32d, 4);
1524
1525 fill_tst(card, n, vc);
1526 }
1527 else if (vcc->qos.txtp.traffic_class == ATM_UBR)
1528 {
1529 vc->cbr_scd = 0x00000000;
1530 vc->scq = card->scq0;
1531 }
1532
1533 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1534 {
1535 vc->tx = 1;
1536 vc->tx_vcc = vcc;
1537 vc->tbd_count = 0;
1538 }
1539 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1540 {
1541 u32 status;
1542
1543 vc->rx = 1;
1544 vc->rx_vcc = vcc;
1545 vc->rx_iov = NULL;
1546
1547 /* Open the connection in hardware */
1548 if (vcc->qos.aal == ATM_AAL5)
1549 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1550 else /* vcc->qos.aal == ATM_AAL0 */
1551 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1552 #ifdef RCQ_SUPPORT
1553 status |= NS_RCTE_RAWCELLINTEN;
1554 #endif /* RCQ_SUPPORT */
1555 ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
1556 NS_RCT_ENTRY_SIZE, &status, 1);
1557 }
1558
1559 }
1560
1561 set_bit(ATM_VF_READY,&vcc->flags);
1562 return 0;
1563 }
1564
1565
1566
ns_close(struct atm_vcc * vcc)1567 static void ns_close(struct atm_vcc *vcc)
1568 {
1569 vc_map *vc;
1570 ns_dev *card;
1571 u32 data;
1572 int i;
1573
1574 vc = vcc->dev_data;
1575 card = vcc->dev->dev_data;
1576 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1577 (int) vcc->vpi, vcc->vci);
1578
1579 clear_bit(ATM_VF_READY,&vcc->flags);
1580
1581 if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1582 {
1583 u32 addr;
1584 unsigned long flags;
1585
1586 addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1587 ns_grab_res_lock(card, flags);
1588 while(CMD_BUSY(card));
1589 writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
1590 spin_unlock_irqrestore(&card->res_lock, flags);
1591
1592 vc->rx = 0;
1593 if (vc->rx_iov != NULL)
1594 {
1595 struct sk_buff *iovb;
1596 u32 stat;
1597
1598 stat = readl(card->membase + STAT);
1599 card->sbfqc = ns_stat_sfbqc_get(stat);
1600 card->lbfqc = ns_stat_lfbqc_get(stat);
1601
1602 PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
1603 card->index);
1604 iovb = vc->rx_iov;
1605 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
1606 NS_SKB(iovb)->iovcnt);
1607 NS_SKB(iovb)->iovcnt = 0;
1608 NS_SKB(iovb)->vcc = NULL;
1609 ns_grab_int_lock(card, flags);
1610 recycle_iov_buf(card, iovb);
1611 spin_unlock_irqrestore(&card->int_lock, flags);
1612 vc->rx_iov = NULL;
1613 }
1614 }
1615
1616 if (vcc->qos.txtp.traffic_class != ATM_NONE)
1617 {
1618 vc->tx = 0;
1619 }
1620
1621 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1622 {
1623 unsigned long flags;
1624 ns_scqe *scqep;
1625 scq_info *scq;
1626
1627 scq = vc->scq;
1628
1629 for (;;)
1630 {
1631 ns_grab_scq_lock(card, scq, flags);
1632 scqep = scq->next;
1633 if (scqep == scq->base)
1634 scqep = scq->last;
1635 else
1636 scqep--;
1637 if (scqep == scq->tail)
1638 {
1639 spin_unlock_irqrestore(&scq->lock, flags);
1640 break;
1641 }
1642 /* If the last entry is not a TSR, place one in the SCQ in order to
1643 be able to completely drain it and then close. */
1644 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
1645 {
1646 ns_scqe tsr;
1647 u32 scdi, scqi;
1648 u32 data;
1649 int index;
1650
1651 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1652 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1653 scqi = scq->next - scq->base;
1654 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1655 tsr.word_3 = 0x00000000;
1656 tsr.word_4 = 0x00000000;
1657 *scq->next = tsr;
1658 index = (int) scqi;
1659 scq->skb[index] = NULL;
1660 if (scq->next == scq->last)
1661 scq->next = scq->base;
1662 else
1663 scq->next++;
1664 data = (u32) virt_to_bus(scq->next);
1665 ns_write_sram(card, scq->scd, &data, 1);
1666 }
1667 spin_unlock_irqrestore(&scq->lock, flags);
1668 schedule();
1669 }
1670
1671 /* Free all TST entries */
1672 data = NS_TST_OPCODE_VARIABLE;
1673 for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
1674 {
1675 if (card->tste2vc[i] == vc)
1676 {
1677 ns_write_sram(card, card->tst_addr + i, &data, 1);
1678 card->tste2vc[i] = NULL;
1679 card->tst_free_entries++;
1680 }
1681 }
1682
1683 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1684 free_scq(vc->scq, vcc);
1685 }
1686
1687 vcc->dev_data = NULL;
1688 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1689 clear_bit(ATM_VF_ADDR,&vcc->flags);
1690
1691 #ifdef RX_DEBUG
1692 {
1693 u32 stat, cfg;
1694 stat = readl(card->membase + STAT);
1695 cfg = readl(card->membase + CFG);
1696 printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg);
1697 printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n",
1698 (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
1699 readl(card->membase + TSQT));
1700 printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n",
1701 (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
1702 readl(card->membase + RSQT));
1703 printk("Empty free buffer queue interrupt %s \n",
1704 card->efbie ? "enabled" : "disabled");
1705 printk("SBCNT = %d count = %d LBCNT = %d count = %d \n",
1706 ns_stat_sfbqc_get(stat), card->sbpool.count,
1707 ns_stat_lfbqc_get(stat), card->lbpool.count);
1708 printk("hbpool.count = %d iovpool.count = %d \n",
1709 card->hbpool.count, card->iovpool.count);
1710 }
1711 #endif /* RX_DEBUG */
1712 }
1713
1714
1715
fill_tst(ns_dev * card,int n,vc_map * vc)1716 static void fill_tst(ns_dev *card, int n, vc_map *vc)
1717 {
1718 u32 new_tst;
1719 unsigned long cl;
1720 int e, r;
1721 u32 data;
1722
1723 /* It would be very complicated to keep the two TSTs synchronized while
1724 assuring that writes are only made to the inactive TST. So, for now I
1725 will use only one TST. If problems occur, I will change this again */
1726
1727 new_tst = card->tst_addr;
1728
1729 /* Fill procedure */
1730
1731 for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
1732 {
1733 if (card->tste2vc[e] == NULL)
1734 break;
1735 }
1736 if (e == NS_TST_NUM_ENTRIES) {
1737 printk("nicstar%d: No free TST entries found. \n", card->index);
1738 return;
1739 }
1740
1741 r = n;
1742 cl = NS_TST_NUM_ENTRIES;
1743 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1744
1745 while (r > 0)
1746 {
1747 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL)
1748 {
1749 card->tste2vc[e] = vc;
1750 ns_write_sram(card, new_tst + e, &data, 1);
1751 cl -= NS_TST_NUM_ENTRIES;
1752 r--;
1753 }
1754
1755 if (++e == NS_TST_NUM_ENTRIES) {
1756 e = 0;
1757 }
1758 cl += n;
1759 }
1760
1761 /* End of fill procedure */
1762
1763 data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1764 ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1765 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1766 card->tst_addr = new_tst;
1767 }
1768
1769
1770
ns_send(struct atm_vcc * vcc,struct sk_buff * skb)1771 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1772 {
1773 ns_dev *card;
1774 vc_map *vc;
1775 scq_info *scq;
1776 unsigned long buflen;
1777 ns_scqe scqe;
1778 u32 flags; /* TBD flags, not CPU flags */
1779
1780 card = vcc->dev->dev_data;
1781 TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1782 if ((vc = (vc_map *) vcc->dev_data) == NULL)
1783 {
1784 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
1785 atomic_inc(&vcc->stats->tx_err);
1786 dev_kfree_skb_any(skb);
1787 return -EINVAL;
1788 }
1789
1790 if (!vc->tx)
1791 {
1792 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
1793 atomic_inc(&vcc->stats->tx_err);
1794 dev_kfree_skb_any(skb);
1795 return -EINVAL;
1796 }
1797
1798 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1799 {
1800 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
1801 atomic_inc(&vcc->stats->tx_err);
1802 dev_kfree_skb_any(skb);
1803 return -EINVAL;
1804 }
1805
1806 if (skb_shinfo(skb)->nr_frags != 0)
1807 {
1808 printk("nicstar%d: No scatter-gather yet.\n", card->index);
1809 atomic_inc(&vcc->stats->tx_err);
1810 dev_kfree_skb_any(skb);
1811 return -EINVAL;
1812 }
1813
1814 ATM_SKB(skb)->vcc = vcc;
1815
1816 if (vcc->qos.aal == ATM_AAL5)
1817 {
1818 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1819 flags = NS_TBD_AAL5;
1820 scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data));
1821 scqe.word_3 = cpu_to_le32((u32) skb->len);
1822 scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1823 ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1824 flags |= NS_TBD_EOPDU;
1825 }
1826 else /* (vcc->qos.aal == ATM_AAL0) */
1827 {
1828 buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */
1829 flags = NS_TBD_AAL0;
1830 scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
1831 scqe.word_3 = cpu_to_le32(0x00000000);
1832 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */
1833 flags |= NS_TBD_EOPDU;
1834 scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1835 /* Force the VPI/VCI to be the same as in VCC struct */
1836 scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1837 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) &
1838 NS_TBD_VC_MASK);
1839 }
1840
1841 if (vcc->qos.txtp.traffic_class == ATM_CBR)
1842 {
1843 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1844 scq = ((vc_map *) vcc->dev_data)->scq;
1845 }
1846 else
1847 {
1848 scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1849 scq = card->scq0;
1850 }
1851
1852 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
1853 {
1854 atomic_inc(&vcc->stats->tx_err);
1855 dev_kfree_skb_any(skb);
1856 return -EIO;
1857 }
1858 atomic_inc(&vcc->stats->tx);
1859
1860 return 0;
1861 }
1862
1863
1864
push_scqe(ns_dev * card,vc_map * vc,scq_info * scq,ns_scqe * tbd,struct sk_buff * skb)1865 static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
1866 struct sk_buff *skb)
1867 {
1868 unsigned long flags;
1869 ns_scqe tsr;
1870 u32 scdi, scqi;
1871 int scq_is_vbr;
1872 u32 data;
1873 int index;
1874
1875 ns_grab_scq_lock(card, scq, flags);
1876 while (scq->tail == scq->next)
1877 {
1878 if (in_interrupt()) {
1879 spin_unlock_irqrestore(&scq->lock, flags);
1880 printk("nicstar%d: Error pushing TBD.\n", card->index);
1881 return 1;
1882 }
1883
1884 scq->full = 1;
1885 spin_unlock_irqrestore(&scq->lock, flags);
1886 interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1887 ns_grab_scq_lock(card, scq, flags);
1888
1889 if (scq->full) {
1890 spin_unlock_irqrestore(&scq->lock, flags);
1891 printk("nicstar%d: Timeout pushing TBD.\n", card->index);
1892 return 1;
1893 }
1894 }
1895 *scq->next = *tbd;
1896 index = (int) (scq->next - scq->base);
1897 scq->skb[index] = skb;
1898 XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
1899 card->index, (u32) skb, index);
1900 XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1901 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1902 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1903 (u32) scq->next);
1904 if (scq->next == scq->last)
1905 scq->next = scq->base;
1906 else
1907 scq->next++;
1908
1909 vc->tbd_count++;
1910 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1911 {
1912 scq->tbd_count++;
1913 scq_is_vbr = 1;
1914 }
1915 else
1916 scq_is_vbr = 0;
1917
1918 if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
1919 {
1920 int has_run = 0;
1921
1922 while (scq->tail == scq->next)
1923 {
1924 if (in_interrupt()) {
1925 data = (u32) virt_to_bus(scq->next);
1926 ns_write_sram(card, scq->scd, &data, 1);
1927 spin_unlock_irqrestore(&scq->lock, flags);
1928 printk("nicstar%d: Error pushing TSR.\n", card->index);
1929 return 0;
1930 }
1931
1932 scq->full = 1;
1933 if (has_run++) break;
1934 spin_unlock_irqrestore(&scq->lock, flags);
1935 interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1936 ns_grab_scq_lock(card, scq, flags);
1937 }
1938
1939 if (!scq->full)
1940 {
1941 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1942 if (scq_is_vbr)
1943 scdi = NS_TSR_SCDISVBR;
1944 else
1945 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1946 scqi = scq->next - scq->base;
1947 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1948 tsr.word_3 = 0x00000000;
1949 tsr.word_4 = 0x00000000;
1950
1951 *scq->next = tsr;
1952 index = (int) scqi;
1953 scq->skb[index] = NULL;
1954 XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1955 card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2),
1956 le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4),
1957 (u32) scq->next);
1958 if (scq->next == scq->last)
1959 scq->next = scq->base;
1960 else
1961 scq->next++;
1962 vc->tbd_count = 0;
1963 scq->tbd_count = 0;
1964 }
1965 else
1966 PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index);
1967 }
1968 data = (u32) virt_to_bus(scq->next);
1969 ns_write_sram(card, scq->scd, &data, 1);
1970
1971 spin_unlock_irqrestore(&scq->lock, flags);
1972
1973 return 0;
1974 }
1975
1976
1977
process_tsq(ns_dev * card)1978 static void process_tsq(ns_dev *card)
1979 {
1980 u32 scdi;
1981 scq_info *scq;
1982 ns_tsi *previous = NULL, *one_ahead, *two_ahead;
1983 int serviced_entries; /* flag indicating at least on entry was serviced */
1984
1985 serviced_entries = 0;
1986
1987 if (card->tsq.next == card->tsq.last)
1988 one_ahead = card->tsq.base;
1989 else
1990 one_ahead = card->tsq.next + 1;
1991
1992 if (one_ahead == card->tsq.last)
1993 two_ahead = card->tsq.base;
1994 else
1995 two_ahead = one_ahead + 1;
1996
1997 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1998 !ns_tsi_isempty(two_ahead))
1999 /* At most two empty, as stated in the 77201 errata */
2000 {
2001 serviced_entries = 1;
2002
2003 /* Skip the one or two possible empty entries */
2004 while (ns_tsi_isempty(card->tsq.next)) {
2005 if (card->tsq.next == card->tsq.last)
2006 card->tsq.next = card->tsq.base;
2007 else
2008 card->tsq.next++;
2009 }
2010
2011 if (!ns_tsi_tmrof(card->tsq.next))
2012 {
2013 scdi = ns_tsi_getscdindex(card->tsq.next);
2014 if (scdi == NS_TSI_SCDISVBR)
2015 scq = card->scq0;
2016 else
2017 {
2018 if (card->scd2vc[scdi] == NULL)
2019 {
2020 printk("nicstar%d: could not find VC from SCD index.\n",
2021 card->index);
2022 ns_tsi_init(card->tsq.next);
2023 return;
2024 }
2025 scq = card->scd2vc[scdi]->scq;
2026 }
2027 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
2028 scq->full = 0;
2029 wake_up_interruptible(&(scq->scqfull_waitq));
2030 }
2031
2032 ns_tsi_init(card->tsq.next);
2033 previous = card->tsq.next;
2034 if (card->tsq.next == card->tsq.last)
2035 card->tsq.next = card->tsq.base;
2036 else
2037 card->tsq.next++;
2038
2039 if (card->tsq.next == card->tsq.last)
2040 one_ahead = card->tsq.base;
2041 else
2042 one_ahead = card->tsq.next + 1;
2043
2044 if (one_ahead == card->tsq.last)
2045 two_ahead = card->tsq.base;
2046 else
2047 two_ahead = one_ahead + 1;
2048 }
2049
2050 if (serviced_entries) {
2051 writel((((u32) previous) - ((u32) card->tsq.base)),
2052 card->membase + TSQH);
2053 }
2054 }
2055
2056
2057
drain_scq(ns_dev * card,scq_info * scq,int pos)2058 static void drain_scq(ns_dev *card, scq_info *scq, int pos)
2059 {
2060 struct atm_vcc *vcc;
2061 struct sk_buff *skb;
2062 int i;
2063 unsigned long flags;
2064
2065 XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
2066 card->index, (u32) scq, pos);
2067 if (pos >= scq->num_entries)
2068 {
2069 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
2070 return;
2071 }
2072
2073 ns_grab_scq_lock(card, scq, flags);
2074 i = (int) (scq->tail - scq->base);
2075 if (++i == scq->num_entries)
2076 i = 0;
2077 while (i != pos)
2078 {
2079 skb = scq->skb[i];
2080 XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
2081 card->index, (u32) skb, i);
2082 if (skb != NULL)
2083 {
2084 vcc = ATM_SKB(skb)->vcc;
2085 if (vcc->pop != NULL) {
2086 vcc->pop(vcc, skb);
2087 } else {
2088 dev_kfree_skb_irq(skb);
2089 }
2090 scq->skb[i] = NULL;
2091 }
2092 if (++i == scq->num_entries)
2093 i = 0;
2094 }
2095 scq->tail = scq->base + pos;
2096 spin_unlock_irqrestore(&scq->lock, flags);
2097 }
2098
2099
2100
process_rsq(ns_dev * card)2101 static void process_rsq(ns_dev *card)
2102 {
2103 ns_rsqe *previous;
2104
2105 if (!ns_rsqe_valid(card->rsq.next))
2106 return;
2107 while (ns_rsqe_valid(card->rsq.next))
2108 {
2109 dequeue_rx(card, card->rsq.next);
2110 ns_rsqe_init(card->rsq.next);
2111 previous = card->rsq.next;
2112 if (card->rsq.next == card->rsq.last)
2113 card->rsq.next = card->rsq.base;
2114 else
2115 card->rsq.next++;
2116 }
2117 writel((((u32) previous) - ((u32) card->rsq.base)),
2118 card->membase + RSQH);
2119 }
2120
2121
2122
dequeue_rx(ns_dev * card,ns_rsqe * rsqe)2123 static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2124 {
2125 u32 vpi, vci;
2126 vc_map *vc;
2127 struct sk_buff *iovb;
2128 struct iovec *iov;
2129 struct atm_vcc *vcc;
2130 struct sk_buff *skb;
2131 unsigned short aal5_len;
2132 int len;
2133 u32 stat;
2134
2135 stat = readl(card->membase + STAT);
2136 card->sbfqc = ns_stat_sfbqc_get(stat);
2137 card->lbfqc = ns_stat_lfbqc_get(stat);
2138
2139 skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle);
2140 vpi = ns_rsqe_vpi(rsqe);
2141 vci = ns_rsqe_vci(rsqe);
2142 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
2143 {
2144 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2145 card->index, vpi, vci);
2146 recycle_rx_buf(card, skb);
2147 return;
2148 }
2149
2150 vc = &(card->vcmap[vpi << card->vcibits | vci]);
2151 if (!vc->rx)
2152 {
2153 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2154 card->index, vpi, vci);
2155 recycle_rx_buf(card, skb);
2156 return;
2157 }
2158
2159 vcc = vc->rx_vcc;
2160
2161 if (vcc->qos.aal == ATM_AAL0)
2162 {
2163 struct sk_buff *sb;
2164 unsigned char *cell;
2165 int i;
2166
2167 cell = skb->data;
2168 for (i = ns_rsqe_cellcount(rsqe); i; i--)
2169 {
2170 if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL)
2171 {
2172 printk("nicstar%d: Can't allocate buffers for aal0.\n",
2173 card->index);
2174 atomic_add(i,&vcc->stats->rx_drop);
2175 break;
2176 }
2177 if (!atm_charge(vcc, sb->truesize))
2178 {
2179 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
2180 card->index);
2181 atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
2182 dev_kfree_skb_any(sb);
2183 break;
2184 }
2185 /* Rebuild the header */
2186 *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2187 (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2188 if (i == 1 && ns_rsqe_eopdu(rsqe))
2189 *((u32 *) sb->data) |= 0x00000002;
2190 skb_put(sb, NS_AAL0_HEADER);
2191 memcpy(sb->tail, cell, ATM_CELL_PAYLOAD);
2192 skb_put(sb, ATM_CELL_PAYLOAD);
2193 ATM_SKB(sb)->vcc = vcc;
2194 sb->stamp = xtime;
2195 vcc->push(vcc, sb);
2196 atomic_inc(&vcc->stats->rx);
2197 cell += ATM_CELL_PAYLOAD;
2198 }
2199
2200 recycle_rx_buf(card, skb);
2201 return;
2202 }
2203
2204 /* To reach this point, the AAL layer can only be AAL5 */
2205
2206 if ((iovb = vc->rx_iov) == NULL)
2207 {
2208 iovb = skb_dequeue(&(card->iovpool.queue));
2209 if (iovb == NULL) /* No buffers in the queue */
2210 {
2211 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2212 if (iovb == NULL)
2213 {
2214 printk("nicstar%d: Out of iovec buffers.\n", card->index);
2215 atomic_inc(&vcc->stats->rx_drop);
2216 recycle_rx_buf(card, skb);
2217 return;
2218 }
2219 }
2220 else
2221 if (--card->iovpool.count < card->iovnr.min)
2222 {
2223 struct sk_buff *new_iovb;
2224 if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2225 {
2226 skb_queue_tail(&card->iovpool.queue, new_iovb);
2227 card->iovpool.count++;
2228 }
2229 }
2230 vc->rx_iov = iovb;
2231 NS_SKB(iovb)->iovcnt = 0;
2232 iovb->len = 0;
2233 iovb->tail = iovb->data = iovb->head;
2234 NS_SKB(iovb)->vcc = vcc;
2235 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2236 buffer is stored as iovec base, NOT a pointer to the
2237 small or large buffer itself. */
2238 }
2239 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
2240 {
2241 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2242 atomic_inc(&vcc->stats->rx_err);
2243 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
2244 NS_SKB(iovb)->iovcnt = 0;
2245 iovb->len = 0;
2246 iovb->tail = iovb->data = iovb->head;
2247 NS_SKB(iovb)->vcc = vcc;
2248 }
2249 iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
2250 iov->iov_base = (void *) skb;
2251 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2252 iovb->len += iov->iov_len;
2253
2254 if (NS_SKB(iovb)->iovcnt == 1)
2255 {
2256 if (skb->list != &card->sbpool.queue)
2257 {
2258 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2259 card->index);
2260 which_list(card, skb);
2261 atomic_inc(&vcc->stats->rx_err);
2262 recycle_rx_buf(card, skb);
2263 vc->rx_iov = NULL;
2264 recycle_iov_buf(card, iovb);
2265 return;
2266 }
2267 }
2268 else /* NS_SKB(iovb)->iovcnt >= 2 */
2269 {
2270 if (skb->list != &card->lbpool.queue)
2271 {
2272 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2273 card->index);
2274 which_list(card, skb);
2275 atomic_inc(&vcc->stats->rx_err);
2276 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2277 NS_SKB(iovb)->iovcnt);
2278 vc->rx_iov = NULL;
2279 recycle_iov_buf(card, iovb);
2280 return;
2281 }
2282 }
2283
2284 if (ns_rsqe_eopdu(rsqe))
2285 {
2286 /* This works correctly regardless of the endianness of the host */
2287 unsigned char *L1L2 = (unsigned char *)((u32)skb->data +
2288 iov->iov_len - 6);
2289 aal5_len = L1L2[0] << 8 | L1L2[1];
2290 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2291 if (ns_rsqe_crcerr(rsqe) ||
2292 len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2293 {
2294 printk("nicstar%d: AAL5 CRC error", card->index);
2295 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2296 printk(" - PDU size mismatch.\n");
2297 else
2298 printk(".\n");
2299 atomic_inc(&vcc->stats->rx_err);
2300 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2301 NS_SKB(iovb)->iovcnt);
2302 vc->rx_iov = NULL;
2303 recycle_iov_buf(card, iovb);
2304 return;
2305 }
2306
2307 /* By this point we (hopefully) have a complete SDU without errors. */
2308
2309 if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */
2310 {
2311 /* skb points to a small buffer */
2312 if (!atm_charge(vcc, skb->truesize))
2313 {
2314 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2315 0, 0);
2316 atomic_inc(&vcc->stats->rx_drop);
2317 }
2318 else
2319 {
2320 skb_put(skb, len);
2321 dequeue_sm_buf(card, skb);
2322 #ifdef NS_USE_DESTRUCTORS
2323 skb->destructor = ns_sb_destructor;
2324 #endif /* NS_USE_DESTRUCTORS */
2325 ATM_SKB(skb)->vcc = vcc;
2326 skb->stamp = xtime;
2327 vcc->push(vcc, skb);
2328 atomic_inc(&vcc->stats->rx);
2329 }
2330 }
2331 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
2332 {
2333 struct sk_buff *sb;
2334
2335 sb = (struct sk_buff *) (iov - 1)->iov_base;
2336 /* skb points to a large buffer */
2337
2338 if (len <= NS_SMBUFSIZE)
2339 {
2340 if (!atm_charge(vcc, sb->truesize))
2341 {
2342 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2343 0, 0);
2344 atomic_inc(&vcc->stats->rx_drop);
2345 }
2346 else
2347 {
2348 skb_put(sb, len);
2349 dequeue_sm_buf(card, sb);
2350 #ifdef NS_USE_DESTRUCTORS
2351 sb->destructor = ns_sb_destructor;
2352 #endif /* NS_USE_DESTRUCTORS */
2353 ATM_SKB(sb)->vcc = vcc;
2354 sb->stamp = xtime;
2355 vcc->push(vcc, sb);
2356 atomic_inc(&vcc->stats->rx);
2357 }
2358
2359 push_rxbufs(card, BUF_LG, (u32) skb,
2360 (u32) virt_to_bus(skb->data), 0, 0);
2361
2362 }
2363 else /* len > NS_SMBUFSIZE, the usual case */
2364 {
2365 if (!atm_charge(vcc, skb->truesize))
2366 {
2367 push_rxbufs(card, BUF_LG, (u32) skb,
2368 (u32) virt_to_bus(skb->data), 0, 0);
2369 atomic_inc(&vcc->stats->rx_drop);
2370 }
2371 else
2372 {
2373 dequeue_lg_buf(card, skb);
2374 #ifdef NS_USE_DESTRUCTORS
2375 skb->destructor = ns_lb_destructor;
2376 #endif /* NS_USE_DESTRUCTORS */
2377 skb_push(skb, NS_SMBUFSIZE);
2378 memcpy(skb->data, sb->data, NS_SMBUFSIZE);
2379 skb_put(skb, len - NS_SMBUFSIZE);
2380 ATM_SKB(skb)->vcc = vcc;
2381 skb->stamp = xtime;
2382 vcc->push(vcc, skb);
2383 atomic_inc(&vcc->stats->rx);
2384 }
2385
2386 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2387 0, 0);
2388
2389 }
2390
2391 }
2392 else /* Must push a huge buffer */
2393 {
2394 struct sk_buff *hb, *sb, *lb;
2395 int remaining, tocopy;
2396 int j;
2397
2398 hb = skb_dequeue(&(card->hbpool.queue));
2399 if (hb == NULL) /* No buffers in the queue */
2400 {
2401
2402 hb = dev_alloc_skb(NS_HBUFSIZE);
2403 if (hb == NULL)
2404 {
2405 printk("nicstar%d: Out of huge buffers.\n", card->index);
2406 atomic_inc(&vcc->stats->rx_drop);
2407 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2408 NS_SKB(iovb)->iovcnt);
2409 vc->rx_iov = NULL;
2410 recycle_iov_buf(card, iovb);
2411 return;
2412 }
2413 else if (card->hbpool.count < card->hbnr.min)
2414 {
2415 struct sk_buff *new_hb;
2416 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2417 {
2418 skb_queue_tail(&card->hbpool.queue, new_hb);
2419 card->hbpool.count++;
2420 }
2421 }
2422 }
2423 else
2424 if (--card->hbpool.count < card->hbnr.min)
2425 {
2426 struct sk_buff *new_hb;
2427 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2428 {
2429 skb_queue_tail(&card->hbpool.queue, new_hb);
2430 card->hbpool.count++;
2431 }
2432 if (card->hbpool.count < card->hbnr.min)
2433 {
2434 if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2435 {
2436 skb_queue_tail(&card->hbpool.queue, new_hb);
2437 card->hbpool.count++;
2438 }
2439 }
2440 }
2441
2442 iov = (struct iovec *) iovb->data;
2443
2444 if (!atm_charge(vcc, hb->truesize))
2445 {
2446 recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt);
2447 if (card->hbpool.count < card->hbnr.max)
2448 {
2449 skb_queue_tail(&card->hbpool.queue, hb);
2450 card->hbpool.count++;
2451 }
2452 else
2453 dev_kfree_skb_any(hb);
2454 atomic_inc(&vcc->stats->rx_drop);
2455 }
2456 else
2457 {
2458 /* Copy the small buffer to the huge buffer */
2459 sb = (struct sk_buff *) iov->iov_base;
2460 memcpy(hb->data, sb->data, iov->iov_len);
2461 skb_put(hb, iov->iov_len);
2462 remaining = len - iov->iov_len;
2463 iov++;
2464 /* Free the small buffer */
2465 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
2466 0, 0);
2467
2468 /* Copy all large buffers to the huge buffer and free them */
2469 for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
2470 {
2471 lb = (struct sk_buff *) iov->iov_base;
2472 tocopy = min_t(int, remaining, iov->iov_len);
2473 memcpy(hb->tail, lb->data, tocopy);
2474 skb_put(hb, tocopy);
2475 iov++;
2476 remaining -= tocopy;
2477 push_rxbufs(card, BUF_LG, (u32) lb,
2478 (u32) virt_to_bus(lb->data), 0, 0);
2479 }
2480 #ifdef EXTRA_DEBUG
2481 if (remaining != 0 || hb->len != len)
2482 printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
2483 #endif /* EXTRA_DEBUG */
2484 ATM_SKB(hb)->vcc = vcc;
2485 #ifdef NS_USE_DESTRUCTORS
2486 hb->destructor = ns_hb_destructor;
2487 #endif /* NS_USE_DESTRUCTORS */
2488 hb->stamp = xtime;
2489 vcc->push(vcc, hb);
2490 atomic_inc(&vcc->stats->rx);
2491 }
2492 }
2493
2494 vc->rx_iov = NULL;
2495 recycle_iov_buf(card, iovb);
2496 }
2497
2498 }
2499
2500
2501
2502 #ifdef NS_USE_DESTRUCTORS
2503
ns_sb_destructor(struct sk_buff * sb)2504 static void ns_sb_destructor(struct sk_buff *sb)
2505 {
2506 ns_dev *card;
2507 u32 stat;
2508
2509 card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2510 stat = readl(card->membase + STAT);
2511 card->sbfqc = ns_stat_sfbqc_get(stat);
2512 card->lbfqc = ns_stat_lfbqc_get(stat);
2513
2514 do
2515 {
2516 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2517 if (sb == NULL)
2518 break;
2519 skb_queue_tail(&card->sbpool.queue, sb);
2520 skb_reserve(sb, NS_AAL0_HEADER);
2521 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2522 } while (card->sbfqc < card->sbnr.min);
2523 }
2524
2525
2526
ns_lb_destructor(struct sk_buff * lb)2527 static void ns_lb_destructor(struct sk_buff *lb)
2528 {
2529 ns_dev *card;
2530 u32 stat;
2531
2532 card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2533 stat = readl(card->membase + STAT);
2534 card->sbfqc = ns_stat_sfbqc_get(stat);
2535 card->lbfqc = ns_stat_lfbqc_get(stat);
2536
2537 do
2538 {
2539 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2540 if (lb == NULL)
2541 break;
2542 skb_queue_tail(&card->lbpool.queue, lb);
2543 skb_reserve(lb, NS_SMBUFSIZE);
2544 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2545 } while (card->lbfqc < card->lbnr.min);
2546 }
2547
2548
2549
ns_hb_destructor(struct sk_buff * hb)2550 static void ns_hb_destructor(struct sk_buff *hb)
2551 {
2552 ns_dev *card;
2553
2554 card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2555
2556 while (card->hbpool.count < card->hbnr.init)
2557 {
2558 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2559 if (hb == NULL)
2560 break;
2561 skb_queue_tail(&card->hbpool.queue, hb);
2562 card->hbpool.count++;
2563 }
2564 }
2565
2566 #endif /* NS_USE_DESTRUCTORS */
2567
2568
2569
recycle_rx_buf(ns_dev * card,struct sk_buff * skb)2570 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2571 {
2572 if (skb->list == &card->sbpool.queue)
2573 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2574 else if (skb->list == &card->lbpool.queue)
2575 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
2576 else
2577 {
2578 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2579 dev_kfree_skb_any(skb);
2580 }
2581 }
2582
2583
2584
recycle_iovec_rx_bufs(ns_dev * card,struct iovec * iov,int count)2585 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2586 {
2587 struct sk_buff *skb;
2588
2589 for (; count > 0; count--)
2590 {
2591 skb = (struct sk_buff *) (iov++)->iov_base;
2592 if (skb->list == &card->sbpool.queue)
2593 push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
2594 0, 0);
2595 else if (skb->list == &card->lbpool.queue)
2596 push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
2597 0, 0);
2598 else
2599 {
2600 printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2601 dev_kfree_skb_any(skb);
2602 }
2603 }
2604 }
2605
2606
2607
recycle_iov_buf(ns_dev * card,struct sk_buff * iovb)2608 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2609 {
2610 if (card->iovpool.count < card->iovnr.max)
2611 {
2612 skb_queue_tail(&card->iovpool.queue, iovb);
2613 card->iovpool.count++;
2614 }
2615 else
2616 dev_kfree_skb_any(iovb);
2617 }
2618
2619
2620
dequeue_sm_buf(ns_dev * card,struct sk_buff * sb)2621 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2622 {
2623 skb_unlink(sb);
2624 #ifdef NS_USE_DESTRUCTORS
2625 if (card->sbfqc < card->sbnr.min)
2626 #else
2627 if (card->sbfqc < card->sbnr.init)
2628 {
2629 struct sk_buff *new_sb;
2630 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2631 {
2632 skb_queue_tail(&card->sbpool.queue, new_sb);
2633 skb_reserve(new_sb, NS_AAL0_HEADER);
2634 push_rxbufs(card, BUF_SM, (u32) new_sb,
2635 (u32) virt_to_bus(new_sb->data), 0, 0);
2636 }
2637 }
2638 if (card->sbfqc < card->sbnr.init)
2639 #endif /* NS_USE_DESTRUCTORS */
2640 {
2641 struct sk_buff *new_sb;
2642 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2643 {
2644 skb_queue_tail(&card->sbpool.queue, new_sb);
2645 skb_reserve(new_sb, NS_AAL0_HEADER);
2646 push_rxbufs(card, BUF_SM, (u32) new_sb,
2647 (u32) virt_to_bus(new_sb->data), 0, 0);
2648 }
2649 }
2650 }
2651
2652
2653
dequeue_lg_buf(ns_dev * card,struct sk_buff * lb)2654 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2655 {
2656 skb_unlink(lb);
2657 #ifdef NS_USE_DESTRUCTORS
2658 if (card->lbfqc < card->lbnr.min)
2659 #else
2660 if (card->lbfqc < card->lbnr.init)
2661 {
2662 struct sk_buff *new_lb;
2663 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2664 {
2665 skb_queue_tail(&card->lbpool.queue, new_lb);
2666 skb_reserve(new_lb, NS_SMBUFSIZE);
2667 push_rxbufs(card, BUF_LG, (u32) new_lb,
2668 (u32) virt_to_bus(new_lb->data), 0, 0);
2669 }
2670 }
2671 if (card->lbfqc < card->lbnr.init)
2672 #endif /* NS_USE_DESTRUCTORS */
2673 {
2674 struct sk_buff *new_lb;
2675 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2676 {
2677 skb_queue_tail(&card->lbpool.queue, new_lb);
2678 skb_reserve(new_lb, NS_SMBUFSIZE);
2679 push_rxbufs(card, BUF_LG, (u32) new_lb,
2680 (u32) virt_to_bus(new_lb->data), 0, 0);
2681 }
2682 }
2683 }
2684
2685
2686
ns_proc_read(struct atm_dev * dev,loff_t * pos,char * page)2687 static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2688 {
2689 u32 stat;
2690 ns_dev *card;
2691 int left;
2692
2693 left = (int) *pos;
2694 card = (ns_dev *) dev->dev_data;
2695 stat = readl(card->membase + STAT);
2696 if (!left--)
2697 return sprintf(page, "Pool count min init max \n");
2698 if (!left--)
2699 return sprintf(page, "Small %5d %5d %5d %5d \n",
2700 ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
2701 card->sbnr.max);
2702 if (!left--)
2703 return sprintf(page, "Large %5d %5d %5d %5d \n",
2704 ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
2705 card->lbnr.max);
2706 if (!left--)
2707 return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count,
2708 card->hbnr.min, card->hbnr.init, card->hbnr.max);
2709 if (!left--)
2710 return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count,
2711 card->iovnr.min, card->iovnr.init, card->iovnr.max);
2712 if (!left--)
2713 {
2714 int retval;
2715 retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2716 card->intcnt = 0;
2717 return retval;
2718 }
2719 #if 0
2720 /* Dump 25.6 Mbps PHY registers */
2721 /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2722 here just in case it's needed for debugging. */
2723 if (card->max_pcr == ATM_25_PCR && !left--)
2724 {
2725 u32 phy_regs[4];
2726 u32 i;
2727
2728 for (i = 0; i < 4; i++)
2729 {
2730 while (CMD_BUSY(card));
2731 writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
2732 while (CMD_BUSY(card));
2733 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2734 }
2735
2736 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2737 phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
2738 }
2739 #endif /* 0 - Dump 25.6 Mbps PHY registers */
2740 #if 0
2741 /* Dump TST */
2742 if (left-- < NS_TST_NUM_ENTRIES)
2743 {
2744 if (card->tste2vc[left + 1] == NULL)
2745 return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2746 else
2747 return sprintf(page, "%5d - %d %d \n", left + 1,
2748 card->tste2vc[left + 1]->tx_vcc->vpi,
2749 card->tste2vc[left + 1]->tx_vcc->vci);
2750 }
2751 #endif /* 0 */
2752 return 0;
2753 }
2754
2755
2756
ns_ioctl(struct atm_dev * dev,unsigned int cmd,void * arg)2757 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2758 {
2759 ns_dev *card;
2760 pool_levels pl;
2761 int btype;
2762 unsigned long flags;
2763
2764 card = dev->dev_data;
2765 switch (cmd)
2766 {
2767 case NS_GETPSTAT:
2768 if (get_user(pl.buftype, &((pool_levels *) arg)->buftype))
2769 return -EFAULT;
2770 switch (pl.buftype)
2771 {
2772 case NS_BUFTYPE_SMALL:
2773 pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
2774 pl.level.min = card->sbnr.min;
2775 pl.level.init = card->sbnr.init;
2776 pl.level.max = card->sbnr.max;
2777 break;
2778
2779 case NS_BUFTYPE_LARGE:
2780 pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
2781 pl.level.min = card->lbnr.min;
2782 pl.level.init = card->lbnr.init;
2783 pl.level.max = card->lbnr.max;
2784 break;
2785
2786 case NS_BUFTYPE_HUGE:
2787 pl.count = card->hbpool.count;
2788 pl.level.min = card->hbnr.min;
2789 pl.level.init = card->hbnr.init;
2790 pl.level.max = card->hbnr.max;
2791 break;
2792
2793 case NS_BUFTYPE_IOVEC:
2794 pl.count = card->iovpool.count;
2795 pl.level.min = card->iovnr.min;
2796 pl.level.init = card->iovnr.init;
2797 pl.level.max = card->iovnr.max;
2798 break;
2799
2800 default:
2801 return -ENOIOCTLCMD;
2802
2803 }
2804 if (!copy_to_user((pool_levels *) arg, &pl, sizeof(pl)))
2805 return (sizeof(pl));
2806 else
2807 return -EFAULT;
2808
2809 case NS_SETBUFLEV:
2810 if (!capable(CAP_NET_ADMIN))
2811 return -EPERM;
2812 if (copy_from_user(&pl, (pool_levels *) arg, sizeof(pl)))
2813 return -EFAULT;
2814 if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
2815 return -EINVAL;
2816 if (pl.level.min == 0)
2817 return -EINVAL;
2818 switch (pl.buftype)
2819 {
2820 case NS_BUFTYPE_SMALL:
2821 if (pl.level.max > TOP_SB)
2822 return -EINVAL;
2823 card->sbnr.min = pl.level.min;
2824 card->sbnr.init = pl.level.init;
2825 card->sbnr.max = pl.level.max;
2826 break;
2827
2828 case NS_BUFTYPE_LARGE:
2829 if (pl.level.max > TOP_LB)
2830 return -EINVAL;
2831 card->lbnr.min = pl.level.min;
2832 card->lbnr.init = pl.level.init;
2833 card->lbnr.max = pl.level.max;
2834 break;
2835
2836 case NS_BUFTYPE_HUGE:
2837 if (pl.level.max > TOP_HB)
2838 return -EINVAL;
2839 card->hbnr.min = pl.level.min;
2840 card->hbnr.init = pl.level.init;
2841 card->hbnr.max = pl.level.max;
2842 break;
2843
2844 case NS_BUFTYPE_IOVEC:
2845 if (pl.level.max > TOP_IOVB)
2846 return -EINVAL;
2847 card->iovnr.min = pl.level.min;
2848 card->iovnr.init = pl.level.init;
2849 card->iovnr.max = pl.level.max;
2850 break;
2851
2852 default:
2853 return -EINVAL;
2854
2855 }
2856 return 0;
2857
2858 case NS_ADJBUFLEV:
2859 if (!capable(CAP_NET_ADMIN))
2860 return -EPERM;
2861 btype = (int) arg; /* an int is the same size as a pointer */
2862 switch (btype)
2863 {
2864 case NS_BUFTYPE_SMALL:
2865 while (card->sbfqc < card->sbnr.init)
2866 {
2867 struct sk_buff *sb;
2868
2869 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2870 if (sb == NULL)
2871 return -ENOMEM;
2872 skb_queue_tail(&card->sbpool.queue, sb);
2873 skb_reserve(sb, NS_AAL0_HEADER);
2874 push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
2875 }
2876 break;
2877
2878 case NS_BUFTYPE_LARGE:
2879 while (card->lbfqc < card->lbnr.init)
2880 {
2881 struct sk_buff *lb;
2882
2883 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2884 if (lb == NULL)
2885 return -ENOMEM;
2886 skb_queue_tail(&card->lbpool.queue, lb);
2887 skb_reserve(lb, NS_SMBUFSIZE);
2888 push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
2889 }
2890 break;
2891
2892 case NS_BUFTYPE_HUGE:
2893 while (card->hbpool.count > card->hbnr.init)
2894 {
2895 struct sk_buff *hb;
2896
2897 ns_grab_int_lock(card, flags);
2898 hb = skb_dequeue(&card->hbpool.queue);
2899 card->hbpool.count--;
2900 spin_unlock_irqrestore(&card->int_lock, flags);
2901 if (hb == NULL)
2902 printk("nicstar%d: huge buffer count inconsistent.\n",
2903 card->index);
2904 else
2905 dev_kfree_skb_any(hb);
2906
2907 }
2908 while (card->hbpool.count < card->hbnr.init)
2909 {
2910 struct sk_buff *hb;
2911
2912 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2913 if (hb == NULL)
2914 return -ENOMEM;
2915 ns_grab_int_lock(card, flags);
2916 skb_queue_tail(&card->hbpool.queue, hb);
2917 card->hbpool.count++;
2918 spin_unlock_irqrestore(&card->int_lock, flags);
2919 }
2920 break;
2921
2922 case NS_BUFTYPE_IOVEC:
2923 while (card->iovpool.count > card->iovnr.init)
2924 {
2925 struct sk_buff *iovb;
2926
2927 ns_grab_int_lock(card, flags);
2928 iovb = skb_dequeue(&card->iovpool.queue);
2929 card->iovpool.count--;
2930 spin_unlock_irqrestore(&card->int_lock, flags);
2931 if (iovb == NULL)
2932 printk("nicstar%d: iovec buffer count inconsistent.\n",
2933 card->index);
2934 else
2935 dev_kfree_skb_any(iovb);
2936
2937 }
2938 while (card->iovpool.count < card->iovnr.init)
2939 {
2940 struct sk_buff *iovb;
2941
2942 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2943 if (iovb == NULL)
2944 return -ENOMEM;
2945 ns_grab_int_lock(card, flags);
2946 skb_queue_tail(&card->iovpool.queue, iovb);
2947 card->iovpool.count++;
2948 spin_unlock_irqrestore(&card->int_lock, flags);
2949 }
2950 break;
2951
2952 default:
2953 return -EINVAL;
2954
2955 }
2956 return 0;
2957
2958 default:
2959 if (dev->phy && dev->phy->ioctl) {
2960 return dev->phy->ioctl(dev, cmd, arg);
2961 }
2962 else {
2963 printk("nicstar%d: %s == NULL \n", card->index,
2964 dev->phy ? "dev->phy->ioctl" : "dev->phy");
2965 return -ENOIOCTLCMD;
2966 }
2967 }
2968 }
2969
2970
2971
which_list(ns_dev * card,struct sk_buff * skb)2972 static void which_list(ns_dev *card, struct sk_buff *skb)
2973 {
2974 printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ?
2975 "small" : skb->list == &card->lbpool.queue ? "large" :
2976 skb->list == &card->hbpool.queue ? "huge" :
2977 skb->list == &card->iovpool.queue ? "iovec" : "unknown");
2978 }
2979
2980
2981
ns_poll(unsigned long arg)2982 static void ns_poll(unsigned long arg)
2983 {
2984 int i;
2985 ns_dev *card;
2986 unsigned long flags;
2987 u32 stat_r, stat_w;
2988
2989 PRINTK("nicstar: Entering ns_poll().\n");
2990 for (i = 0; i < num_cards; i++)
2991 {
2992 card = cards[i];
2993 if (spin_is_locked(&card->int_lock)) {
2994 /* Probably it isn't worth spinning */
2995 continue;
2996 }
2997 ns_grab_int_lock(card, flags);
2998
2999 stat_w = 0;
3000 stat_r = readl(card->membase + STAT);
3001 if (stat_r & NS_STAT_TSIF)
3002 stat_w |= NS_STAT_TSIF;
3003 if (stat_r & NS_STAT_EOPDU)
3004 stat_w |= NS_STAT_EOPDU;
3005
3006 process_tsq(card);
3007 process_rsq(card);
3008
3009 writel(stat_w, card->membase + STAT);
3010 spin_unlock_irqrestore(&card->int_lock, flags);
3011 }
3012 mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
3013 PRINTK("nicstar: Leaving ns_poll().\n");
3014 }
3015
3016
3017
ns_parse_mac(char * mac,unsigned char * esi)3018 static int ns_parse_mac(char *mac, unsigned char *esi)
3019 {
3020 int i, j;
3021 short byte1, byte0;
3022
3023 if (mac == NULL || esi == NULL)
3024 return -1;
3025 j = 0;
3026 for (i = 0; i < 6; i++)
3027 {
3028 if ((byte1 = ns_h2i(mac[j++])) < 0)
3029 return -1;
3030 if ((byte0 = ns_h2i(mac[j++])) < 0)
3031 return -1;
3032 esi[i] = (unsigned char) (byte1 * 16 + byte0);
3033 if (i < 5)
3034 {
3035 if (mac[j++] != ':')
3036 return -1;
3037 }
3038 }
3039 return 0;
3040 }
3041
3042
3043
ns_h2i(char c)3044 static short ns_h2i(char c)
3045 {
3046 if (c >= '0' && c <= '9')
3047 return (short) (c - '0');
3048 if (c >= 'A' && c <= 'F')
3049 return (short) (c - 'A' + 10);
3050 if (c >= 'a' && c <= 'f')
3051 return (short) (c - 'a' + 10);
3052 return -1;
3053 }
3054
3055
3056
ns_phy_put(struct atm_dev * dev,unsigned char value,unsigned long addr)3057 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
3058 unsigned long addr)
3059 {
3060 ns_dev *card;
3061 unsigned long flags;
3062
3063 card = dev->dev_data;
3064 ns_grab_res_lock(card, flags);
3065 while(CMD_BUSY(card));
3066 writel((unsigned long) value, card->membase + DR0);
3067 writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
3068 card->membase + CMD);
3069 spin_unlock_irqrestore(&card->res_lock, flags);
3070 }
3071
3072
3073
ns_phy_get(struct atm_dev * dev,unsigned long addr)3074 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
3075 {
3076 ns_dev *card;
3077 unsigned long flags;
3078 unsigned long data;
3079
3080 card = dev->dev_data;
3081 ns_grab_res_lock(card, flags);
3082 while(CMD_BUSY(card));
3083 writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
3084 card->membase + CMD);
3085 while(CMD_BUSY(card));
3086 data = readl(card->membase + DR0) & 0x000000FF;
3087 spin_unlock_irqrestore(&card->res_lock, flags);
3088 return (unsigned char) data;
3089 }
3090
3091
3092
3093 module_init(nicstar_init);
3094 module_exit(nicstar_cleanup);
3095