1 /*
2 * $Id: dmascc.c,v 1.27 2000/06/01 14:46:23 oe1kib Exp $
3 *
4 * Driver for high-speed SCC boards (those with DMA support)
5 * Copyright (C) 1997-2000 Klaus Kudielka
6 *
7 * S5SCC/DMA support by Janko Koleznik S52HI
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24
25
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/if_arp.h>
30 #include <linux/in.h>
31 #include <linux/init.h>
32 #include <linux/interrupt.h>
33 #include <linux/ioport.h>
34 #include <linux/kernel.h>
35 #include <linux/mm.h>
36 #include <linux/netdevice.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/sockios.h>
39 #include <linux/tqueue.h>
40 #include <linux/version.h>
41 #include <asm/atomic.h>
42 #include <asm/bitops.h>
43 #include <asm/dma.h>
44 #include <asm/io.h>
45 #include <asm/irq.h>
46 #include <asm/segment.h>
47 #include <asm/uaccess.h>
48 #include <net/ax25.h>
49 #include "z8530.h"
50
51
52 /* Linux 2.2 and 2.3 compatibility */
53
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,14)
55 #define net_device device
56 #endif
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43)
58 #define netif_start_queue(dev) { dev->tbusy = 0; }
59 #define netif_stop_queue(dev) { dev->tbusy = 1; }
60 #define netif_wake_queue(dev) { dev->tbusy = 0; mark_bh(NET_BH); }
61 #endif
62 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,47)
63 #define netif_running(dev) (dev->flags & IFF_UP)
64 #endif
65
66
67 /* Number of buffers per channel */
68
69 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
70 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
71 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
72
73
74 /* Cards supported */
75
76 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
77 0, 8, 1843200, 3686400 }
78 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
79 0, 8, 3686400, 7372800 }
80 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
81 0, 4, 6144000, 6144000 }
82 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
83 0, 8, 4915200, 9830400 }
84
85 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
86
87 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
88
89 #define TYPE_PI 0
90 #define TYPE_PI2 1
91 #define TYPE_TWIN 2
92 #define TYPE_S5 3
93 #define NUM_TYPES 4
94
95 #define MAX_NUM_DEVS 32
96
97
98 /* SCC chips supported */
99
100 #define Z8530 0
101 #define Z85C30 1
102 #define Z85230 2
103
104 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
105
106
107 /* I/O registers */
108
109 /* 8530 registers relative to card base */
110 #define SCCB_CMD 0x00
111 #define SCCB_DATA 0x01
112 #define SCCA_CMD 0x02
113 #define SCCA_DATA 0x03
114
115 /* 8253/8254 registers relative to card base */
116 #define TMR_CNT0 0x00
117 #define TMR_CNT1 0x01
118 #define TMR_CNT2 0x02
119 #define TMR_CTRL 0x03
120
121 /* Additional PI/PI2 registers relative to card base */
122 #define PI_DREQ_MASK 0x04
123
124 /* Additional PackeTwin registers relative to card base */
125 #define TWIN_INT_REG 0x08
126 #define TWIN_CLR_TMR1 0x09
127 #define TWIN_CLR_TMR2 0x0a
128 #define TWIN_SPARE_1 0x0b
129 #define TWIN_DMA_CFG 0x08
130 #define TWIN_SERIAL_CFG 0x09
131 #define TWIN_DMA_CLR_FF 0x0a
132 #define TWIN_SPARE_2 0x0b
133
134
135 /* PackeTwin I/O register values */
136
137 /* INT_REG */
138 #define TWIN_SCC_MSK 0x01
139 #define TWIN_TMR1_MSK 0x02
140 #define TWIN_TMR2_MSK 0x04
141 #define TWIN_INT_MSK 0x07
142
143 /* SERIAL_CFG */
144 #define TWIN_DTRA_ON 0x01
145 #define TWIN_DTRB_ON 0x02
146 #define TWIN_EXTCLKA 0x04
147 #define TWIN_EXTCLKB 0x08
148 #define TWIN_LOOPA_ON 0x10
149 #define TWIN_LOOPB_ON 0x20
150 #define TWIN_EI 0x80
151
152 /* DMA_CFG */
153 #define TWIN_DMA_HDX_T1 0x08
154 #define TWIN_DMA_HDX_R1 0x0a
155 #define TWIN_DMA_HDX_T3 0x14
156 #define TWIN_DMA_HDX_R3 0x16
157 #define TWIN_DMA_FDX_T3R1 0x1b
158 #define TWIN_DMA_FDX_T1R3 0x1d
159
160
161 /* Status values */
162
163 #define IDLE 0
164 #define TX_HEAD 1
165 #define TX_DATA 2
166 #define TX_PAUSE 3
167 #define TX_TAIL 4
168 #define RTS_OFF 5
169 #define WAIT 6
170 #define DCD_ON 7
171 #define RX_ON 8
172 #define DCD_OFF 9
173
174
175 /* Ioctls */
176
177 #define SIOCGSCCPARAM SIOCDEVPRIVATE
178 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
179
180
181 /* Data types */
182
183 struct scc_param {
184 int pclk_hz; /* frequency of BRG input (don't change) */
185 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
186 int nrzi; /* 0 (nrz), 1 (nrzi) */
187 int clocks; /* see dmascc_cfg documentation */
188 int txdelay; /* [1/TMR_0_HZ] */
189 int txtimeout; /* [1/HZ] */
190 int txtail; /* [1/TMR_0_HZ] */
191 int waittime; /* [1/TMR_0_HZ] */
192 int slottime; /* [1/TMR_0_HZ] */
193 int persist; /* 1 ... 256 */
194 int dma; /* -1 (disable), 0, 1, 3 */
195 int txpause; /* [1/TMR_0_HZ] */
196 int rtsoff; /* [1/TMR_0_HZ] */
197 int dcdon; /* [1/TMR_0_HZ] */
198 int dcdoff; /* [1/TMR_0_HZ] */
199 };
200
201 struct scc_hardware {
202 char *name;
203 int io_region;
204 int io_delta;
205 int io_size;
206 int num_devs;
207 int scc_offset;
208 int tmr_offset;
209 int tmr_hz;
210 int pclk_hz;
211 };
212
213 struct scc_priv {
214 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
215 char name[IFNAMSIZ];
216 #endif
217 int type;
218 int chip;
219 struct net_device *dev;
220 struct scc_info *info;
221 struct net_device_stats stats;
222 int channel;
223 int card_base, scc_cmd, scc_data;
224 int tmr_cnt, tmr_ctrl, tmr_mode;
225 struct scc_param param;
226 char rx_buf[NUM_RX_BUF][BUF_SIZE];
227 int rx_len[NUM_RX_BUF];
228 int rx_ptr;
229 struct tq_struct rx_task;
230 int rx_head, rx_tail, rx_count;
231 int rx_over;
232 char tx_buf[NUM_TX_BUF][BUF_SIZE];
233 int tx_len[NUM_TX_BUF];
234 int tx_ptr;
235 int tx_head, tx_tail, tx_count;
236 int state;
237 unsigned long tx_start;
238 int rr0;
239 };
240
241 struct scc_info {
242 int irq_used;
243 int twin_serial_cfg;
244 struct net_device dev[2];
245 struct scc_priv priv[2];
246 struct scc_info *next;
247 };
248
249
250 /* Function declarations */
251
252 int dmascc_init(void) __init;
253 static int setup_adapter(int card_base, int type, int n) __init;
254
255 static void write_scc(struct scc_priv *priv, int reg, int val);
256 static void write_scc_data(struct scc_priv *priv, int val, int fast);
257 static int read_scc(struct scc_priv *priv, int reg);
258 static int read_scc_data(struct scc_priv *priv);
259
260 static int scc_open(struct net_device *dev);
261 static int scc_close(struct net_device *dev);
262 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
263 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
264 static struct net_device_stats *scc_get_stats(struct net_device *dev);
265 static int scc_set_mac_address(struct net_device *dev, void *sa);
266
267 static void scc_isr(int irq, void *dev_id, struct pt_regs * regs);
268 static inline void z8530_isr(struct scc_info *info);
269 static void rx_isr(struct scc_priv *priv);
270 static void special_condition(struct scc_priv *priv, int rc);
271 static void rx_bh(void *arg);
272 static void tx_isr(struct scc_priv *priv);
273 static void es_isr(struct scc_priv *priv);
274 static void tm_isr(struct scc_priv *priv);
275
276 static inline void tx_on(struct scc_priv *priv);
277 static inline void rx_on(struct scc_priv *priv);
278 static inline void rx_off(struct scc_priv *priv);
279 static void start_timer(struct scc_priv *priv, int t, int r15);
280 static inline unsigned char random(void);
281
282
283 /* Initialization variables */
284
285 static int io[MAX_NUM_DEVS] __initdata = { 0, };
286 /* Beware! hw[] is also used in cleanup_module(). If __initdata also applies
287 to modules, we may not declare hw[] as __initdata */
288 static struct scc_hardware hw[NUM_TYPES] __initdata = HARDWARE;
289 static char ax25_broadcast[7] __initdata =
290 { 'Q'<<1, 'S'<<1, 'T'<<1, ' '<<1, ' '<<1, ' '<<1, '0'<<1 };
291 static char ax25_test[7] __initdata =
292 { 'L'<<1, 'I'<<1, 'N'<<1, 'U'<<1, 'X'<<1, ' '<<1, '1'<<1 };
293
294
295 /* Global variables */
296
297 static struct scc_info *first;
298 static unsigned long rand;
299
300
301 /* Module functions */
302
303 #ifdef MODULE
304
305
306 MODULE_AUTHOR("Klaus Kudielka");
307 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
308 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
309 MODULE_LICENSE("GPL");
310
311
init_module(void)312 int init_module(void) {
313 return dmascc_init();
314 }
315
316
cleanup_module(void)317 void cleanup_module(void) {
318 int i;
319 struct scc_info *info;
320
321 while (first) {
322 info = first;
323
324 /* Unregister devices */
325 for (i = 0; i < 2; i++) {
326 if (info->dev[i].name)
327 rtnl_lock();
328 unregister_netdevice(&info->dev[i]);
329 rtnl_unlock();
330 }
331
332 /* Reset board */
333 if (info->priv[0].type == TYPE_TWIN)
334 outb(0, info->dev[0].base_addr + TWIN_SERIAL_CFG);
335 write_scc(&info->priv[0], R9, FHWRES);
336 release_region(info->dev[0].base_addr,
337 hw[info->priv[0].type].io_size);
338
339 /* Free memory */
340 first = info->next;
341 kfree(info);
342 }
343 }
344
345
346 #else
347
348
dmascc_setup(char * str,int * ints)349 void __init dmascc_setup(char *str, int *ints) {
350 int i;
351
352 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
353 io[i] = ints[i+1];
354 }
355
356
357 #endif
358
random(void)359 static inline unsigned char random(void) {
360 /* See "Numerical Recipes in C", second edition, p. 284 */
361 rand = rand * 1664525L + 1013904223L;
362 return (unsigned char) (rand >> 24);
363 }
364
365
366 /* Initialization functions */
367
dmascc_init(void)368 int __init dmascc_init(void) {
369 int h, i, j, n;
370 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
371 t1[MAX_NUM_DEVS];
372 unsigned t_val;
373 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
374 counting[MAX_NUM_DEVS];
375
376 /* Initialize random number generator */
377 rand = jiffies;
378 /* Cards found = 0 */
379 n = 0;
380 /* Warning message */
381 if (!io[0]) printk("dmascc: autoprobing (dangerous)\n");
382
383 /* Run autodetection for each card type */
384 for (h = 0; h < NUM_TYPES; h++) {
385
386 if (io[0]) {
387 /* User-specified I/O address regions */
388 for (i = 0; i < hw[h].num_devs; i++) base[i] = 0;
389 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
390 j = (io[i] - hw[h].io_region) / hw[h].io_delta;
391 if (j >= 0 &&
392 j < hw[h].num_devs &&
393 hw[h].io_region + j * hw[h].io_delta == io[i]) {
394 base[j] = io[i];
395 }
396 }
397 } else {
398 /* Default I/O address regions */
399 for (i = 0; i < hw[h].num_devs; i++) {
400 base[i] = hw[h].io_region + i * hw[h].io_delta;
401 }
402 }
403
404 /* Check valid I/O address regions */
405 for (i = 0; i < hw[h].num_devs; i++)
406 if (base[i]) {
407 if (check_region(base[i], hw[h].io_size))
408 base[i] = 0;
409 else {
410 tcmd[i] = base[i] + hw[h].tmr_offset + TMR_CTRL;
411 t0[i] = base[i] + hw[h].tmr_offset + TMR_CNT0;
412 t1[i] = base[i] + hw[h].tmr_offset + TMR_CNT1;
413 }
414 }
415
416 /* Start timers */
417 for (i = 0; i < hw[h].num_devs; i++)
418 if (base[i]) {
419 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
420 outb(0x36, tcmd[i]);
421 outb((hw[h].tmr_hz/TMR_0_HZ) & 0xFF, t0[i]);
422 outb((hw[h].tmr_hz/TMR_0_HZ) >> 8, t0[i]);
423 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
424 outb(0x70, tcmd[i]);
425 outb((TMR_0_HZ/HZ*10) & 0xFF, t1[i]);
426 outb((TMR_0_HZ/HZ*10) >> 8, t1[i]);
427 start[i] = jiffies;
428 delay[i] = 0;
429 counting[i] = 1;
430 /* Timer 2: LSB+MSB, Mode 0 */
431 outb(0xb0, tcmd[i]);
432 }
433 time = jiffies;
434 /* Wait until counter registers are loaded */
435 udelay(2000000/TMR_0_HZ);
436
437 /* Timing loop */
438 while (jiffies - time < 13) {
439 for (i = 0; i < hw[h].num_devs; i++)
440 if (base[i] && counting[i]) {
441 /* Read back Timer 1: latch; read LSB; read MSB */
442 outb(0x40, tcmd[i]);
443 t_val = inb(t1[i]) + (inb(t1[i]) << 8);
444 /* Also check whether counter did wrap */
445 if (t_val == 0 || t_val > TMR_0_HZ/HZ*10) counting[i] = 0;
446 delay[i] = jiffies - start[i];
447 }
448 }
449
450 /* Evaluate measurements */
451 for (i = 0; i < hw[h].num_devs; i++)
452 if (base[i]) {
453 if (delay[i] >= 9 && delay[i] <= 11) {
454 /* Ok, we have found an adapter */
455 if (setup_adapter(base[i], h, n) == 0)
456 n++;
457 }
458 }
459
460 } /* NUM_TYPES */
461
462 /* If any adapter was successfully initialized, return ok */
463 if (n) return 0;
464
465 /* If no adapter found, return error */
466 printk("dmascc: no adapters found\n");
467 return -EIO;
468 }
469
470
setup_adapter(int card_base,int type,int n)471 int __init setup_adapter(int card_base, int type, int n) {
472 int i, irq, chip;
473 struct scc_info *info;
474 struct net_device *dev;
475 struct scc_priv *priv;
476 unsigned long time;
477 unsigned int irqs;
478 int tmr_base = card_base + hw[type].tmr_offset;
479 int scc_base = card_base + hw[type].scc_offset;
480 char *chipnames[] = CHIPNAMES;
481
482 /* Allocate memory */
483 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
484 if (!info) {
485 printk("dmascc: could not allocate memory for %s at %#3x\n",
486 hw[type].name, card_base);
487 return -1;
488 }
489
490 /* Initialize what is necessary for write_scc and write_scc_data */
491 memset(info, 0, sizeof(struct scc_info));
492 priv = &info->priv[0];
493 priv->type = type;
494 priv->card_base = card_base;
495 priv->scc_cmd = scc_base + SCCA_CMD;
496 priv->scc_data = scc_base + SCCA_DATA;
497
498 /* Reset SCC */
499 write_scc(priv, R9, FHWRES | MIE | NV);
500
501 /* Determine type of chip by enabling SDLC/HDLC enhancements */
502 write_scc(priv, R15, SHDLCE);
503 if (!read_scc(priv, R15)) {
504 /* WR7' not present. This is an ordinary Z8530 SCC. */
505 chip = Z8530;
506 } else {
507 /* Put one character in TX FIFO */
508 write_scc_data(priv, 0, 0);
509 if (read_scc(priv, R0) & Tx_BUF_EMP) {
510 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
511 chip = Z85230;
512 } else {
513 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
514 chip = Z85C30;
515 }
516 }
517 write_scc(priv, R15, 0);
518
519 /* Start IRQ auto-detection */
520 sti();
521 irqs = probe_irq_on();
522
523 /* Enable interrupts */
524 if (type == TYPE_TWIN) {
525 outb(0, card_base + TWIN_DMA_CFG);
526 inb(card_base + TWIN_CLR_TMR1);
527 inb(card_base + TWIN_CLR_TMR2);
528 outb((info->twin_serial_cfg = TWIN_EI), card_base + TWIN_SERIAL_CFG);
529 } else {
530 write_scc(priv, R15, CTSIE);
531 write_scc(priv, R0, RES_EXT_INT);
532 write_scc(priv, R1, EXT_INT_ENAB);
533 }
534
535 /* Start timer */
536 outb(1, tmr_base + TMR_CNT1);
537 outb(0, tmr_base + TMR_CNT1);
538
539 /* Wait and detect IRQ */
540 time = jiffies; while (jiffies - time < 2 + HZ / TMR_0_HZ);
541 irq = probe_irq_off(irqs);
542
543 /* Clear pending interrupt, disable interrupts */
544 if (type == TYPE_TWIN) {
545 inb(card_base + TWIN_CLR_TMR1);
546 } else {
547 write_scc(priv, R1, 0);
548 write_scc(priv, R15, 0);
549 write_scc(priv, R0, RES_EXT_INT);
550 }
551
552 if (irq <= 0) {
553 printk("dmascc: could not find irq of %s at %#3x (irq=%d)\n",
554 hw[type].name, card_base, irq);
555 kfree(info);
556 return -1;
557 }
558
559 /* Set up data structures */
560 for (i = 0; i < 2; i++) {
561 dev = &info->dev[i];
562 priv = &info->priv[i];
563 priv->type = type;
564 priv->chip = chip;
565 priv->dev = dev;
566 priv->info = info;
567 priv->channel = i;
568 priv->card_base = card_base;
569 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
570 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
571 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
572 priv->tmr_ctrl = tmr_base + TMR_CTRL;
573 priv->tmr_mode = i ? 0xb0 : 0x70;
574 priv->param.pclk_hz = hw[type].pclk_hz;
575 priv->param.brg_tc = -1;
576 priv->param.clocks = TCTRxCP | RCRTxCP;
577 priv->param.persist = 256;
578 priv->param.dma = -1;
579 priv->rx_task.routine = rx_bh;
580 priv->rx_task.data = priv;
581 dev->priv = priv;
582 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
583 if (sizeof(dev->name) == sizeof(char *)) dev->name = priv->name;
584 #endif
585 sprintf(dev->name, "dmascc%i", 2*n+i);
586 dev->base_addr = card_base;
587 dev->irq = irq;
588 dev->open = scc_open;
589 dev->stop = scc_close;
590 dev->do_ioctl = scc_ioctl;
591 dev->hard_start_xmit = scc_send_packet;
592 dev->get_stats = scc_get_stats;
593 dev->hard_header = ax25_encapsulate;
594 dev->rebuild_header = ax25_rebuild_header;
595 dev->set_mac_address = scc_set_mac_address;
596 dev->type = ARPHRD_AX25;
597 dev->hard_header_len = 73;
598 dev->mtu = 1500;
599 dev->addr_len = 7;
600 dev->tx_queue_len = 64;
601 memcpy(dev->broadcast, ax25_broadcast, 7);
602 memcpy(dev->dev_addr, ax25_test, 7);
603 rtnl_lock();
604 if (register_netdevice(dev)) {
605 printk("dmascc: could not register %s\n", dev->name);
606 }
607 rtnl_unlock();
608 }
609
610 request_region(card_base, hw[type].io_size, "dmascc");
611
612 info->next = first;
613 first = info;
614 printk("dmascc: found %s (%s) at %#3x, irq %d\n", hw[type].name,
615 chipnames[chip], card_base, irq);
616 return 0;
617 }
618
619
620 /* Driver functions */
621
write_scc(struct scc_priv * priv,int reg,int val)622 static void write_scc(struct scc_priv *priv, int reg, int val) {
623 unsigned long flags;
624 switch (priv->type) {
625 case TYPE_S5:
626 if (reg) outb(reg, priv->scc_cmd);
627 outb(val, priv->scc_cmd);
628 return;
629 case TYPE_TWIN:
630 if (reg) outb_p(reg, priv->scc_cmd);
631 outb_p(val, priv->scc_cmd);
632 return;
633 default:
634 save_flags(flags);
635 cli();
636 outb_p(0, priv->card_base + PI_DREQ_MASK);
637 if (reg) outb_p(reg, priv->scc_cmd);
638 outb_p(val, priv->scc_cmd);
639 outb(1, priv->card_base + PI_DREQ_MASK);
640 restore_flags(flags);
641 return;
642 }
643 }
644
645
write_scc_data(struct scc_priv * priv,int val,int fast)646 static void write_scc_data(struct scc_priv *priv, int val, int fast) {
647 unsigned long flags;
648 switch (priv->type) {
649 case TYPE_S5:
650 outb(val, priv->scc_data);
651 return;
652 case TYPE_TWIN:
653 outb_p(val, priv->scc_data);
654 return;
655 default:
656 if (fast) outb_p(val, priv->scc_data);
657 else {
658 save_flags(flags);
659 cli();
660 outb_p(0, priv->card_base + PI_DREQ_MASK);
661 outb_p(val, priv->scc_data);
662 outb(1, priv->card_base + PI_DREQ_MASK);
663 restore_flags(flags);
664 }
665 return;
666 }
667 }
668
669
read_scc(struct scc_priv * priv,int reg)670 static int read_scc(struct scc_priv *priv, int reg) {
671 int rc;
672 unsigned long flags;
673 switch (priv->type) {
674 case TYPE_S5:
675 if (reg) outb(reg, priv->scc_cmd);
676 return inb(priv->scc_cmd);
677 case TYPE_TWIN:
678 if (reg) outb_p(reg, priv->scc_cmd);
679 return inb_p(priv->scc_cmd);
680 default:
681 save_flags(flags);
682 cli();
683 outb_p(0, priv->card_base + PI_DREQ_MASK);
684 if (reg) outb_p(reg, priv->scc_cmd);
685 rc = inb_p(priv->scc_cmd);
686 outb(1, priv->card_base + PI_DREQ_MASK);
687 restore_flags(flags);
688 return rc;
689 }
690 }
691
692
read_scc_data(struct scc_priv * priv)693 static int read_scc_data(struct scc_priv *priv) {
694 int rc;
695 unsigned long flags;
696 switch (priv->type) {
697 case TYPE_S5:
698 return inb(priv->scc_data);
699 case TYPE_TWIN:
700 return inb_p(priv->scc_data);
701 default:
702 save_flags(flags);
703 cli();
704 outb_p(0, priv->card_base + PI_DREQ_MASK);
705 rc = inb_p(priv->scc_data);
706 outb(1, priv->card_base + PI_DREQ_MASK);
707 restore_flags(flags);
708 return rc;
709 }
710 }
711
712
scc_open(struct net_device * dev)713 static int scc_open(struct net_device *dev) {
714 struct scc_priv *priv = dev->priv;
715 struct scc_info *info = priv->info;
716 int card_base = priv->card_base;
717
718 MOD_INC_USE_COUNT;
719
720 /* Request IRQ if not already used by other channel */
721 if (!info->irq_used) {
722 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
723 MOD_DEC_USE_COUNT;
724 return -EAGAIN;
725 }
726 }
727 info->irq_used++;
728
729 /* Request DMA if required */
730 if (priv->param.dma >= 0) {
731 if (request_dma(priv->param.dma, "dmascc")) {
732 if (--info->irq_used == 0) free_irq(dev->irq, info);
733 MOD_DEC_USE_COUNT;
734 return -EAGAIN;
735 } else {
736 unsigned long flags = claim_dma_lock();
737 clear_dma_ff(priv->param.dma);
738 release_dma_lock(flags);
739 }
740 }
741
742 /* Initialize local variables */
743 priv->rx_ptr = 0;
744 priv->rx_over = 0;
745 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
746 priv->state = IDLE;
747 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
748 priv->tx_ptr = 0;
749
750 /* Reset channel */
751 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
752 /* X1 clock, SDLC mode */
753 write_scc(priv, R4, SDLC | X1CLK);
754 /* DMA */
755 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
756 /* 8 bit RX char, RX disable */
757 write_scc(priv, R3, Rx8);
758 /* 8 bit TX char, TX disable */
759 write_scc(priv, R5, Tx8);
760 /* SDLC address field */
761 write_scc(priv, R6, 0);
762 /* SDLC flag */
763 write_scc(priv, R7, FLAG);
764 switch (priv->chip) {
765 case Z85C30:
766 /* Select WR7' */
767 write_scc(priv, R15, SHDLCE);
768 /* Auto EOM reset */
769 write_scc(priv, R7, AUTOEOM);
770 write_scc(priv, R15, 0);
771 break;
772 case Z85230:
773 /* Select WR7' */
774 write_scc(priv, R15, SHDLCE);
775 /* The following bits are set (see 2.5.2.1):
776 - Automatic EOM reset
777 - Interrupt request if RX FIFO is half full
778 This bit should be ignored in DMA mode (according to the
779 documentation), but actually isn't. The receiver doesn't work if
780 it is set. Thus, we have to clear it in DMA mode.
781 - Interrupt/DMA request if TX FIFO is completely empty
782 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
783 compatibility).
784 b) If cleared, DMA requests may follow each other very quickly,
785 filling up the TX FIFO.
786 Advantage: TX works even in case of high bus latency.
787 Disadvantage: Edge-triggered DMA request circuitry may miss
788 a request. No more data is delivered, resulting
789 in a TX FIFO underrun.
790 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
791 The PackeTwin doesn't. I don't know about the PI, but let's
792 assume it behaves like the PI2.
793 */
794 if (priv->param.dma >= 0) {
795 if (priv->type == TYPE_TWIN) write_scc(priv, R7, AUTOEOM | TXFIFOE);
796 else write_scc(priv, R7, AUTOEOM);
797 } else {
798 write_scc(priv, R7, AUTOEOM | RXFIFOH);
799 }
800 write_scc(priv, R15, 0);
801 break;
802 }
803 /* Preset CRC, NRZ(I) encoding */
804 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
805
806 /* Configure baud rate generator */
807 if (priv->param.brg_tc >= 0) {
808 /* Program BR generator */
809 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
810 write_scc(priv, R13, (priv->param.brg_tc>>8) & 0xFF);
811 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
812 PackeTwin, not connected on the PI2); set DPLL source to BRG */
813 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
814 /* Enable DPLL */
815 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
816 } else {
817 /* Disable BR generator */
818 write_scc(priv, R14, DTRREQ | BRSRC);
819 }
820
821 /* Configure clocks */
822 if (priv->type == TYPE_TWIN) {
823 /* Disable external TX clock receiver */
824 outb((info->twin_serial_cfg &=
825 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
826 card_base + TWIN_SERIAL_CFG);
827 }
828 write_scc(priv, R11, priv->param.clocks);
829 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
830 /* Enable external TX clock receiver */
831 outb((info->twin_serial_cfg |=
832 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
833 card_base + TWIN_SERIAL_CFG);
834 }
835
836 /* Configure PackeTwin */
837 if (priv->type == TYPE_TWIN) {
838 /* Assert DTR, enable interrupts */
839 outb((info->twin_serial_cfg |= TWIN_EI |
840 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
841 card_base + TWIN_SERIAL_CFG);
842 }
843
844 /* Read current status */
845 priv->rr0 = read_scc(priv, R0);
846 /* Enable DCD interrupt */
847 write_scc(priv, R15, DCDIE);
848
849 netif_start_queue(dev);
850
851 return 0;
852 }
853
854
scc_close(struct net_device * dev)855 static int scc_close(struct net_device *dev) {
856 struct scc_priv *priv = dev->priv;
857 struct scc_info *info = priv->info;
858 int card_base = priv->card_base;
859
860 netif_stop_queue(dev);
861
862 if (priv->type == TYPE_TWIN) {
863 /* Drop DTR */
864 outb((info->twin_serial_cfg &=
865 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
866 card_base + TWIN_SERIAL_CFG);
867 }
868
869 /* Reset channel, free DMA and IRQ */
870 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
871 if (priv->param.dma >= 0) {
872 if (priv->type == TYPE_TWIN) outb(0, card_base + TWIN_DMA_CFG);
873 free_dma(priv->param.dma);
874 }
875 if (--info->irq_used == 0) free_irq(dev->irq, info);
876
877 MOD_DEC_USE_COUNT;
878 return 0;
879 }
880
881
scc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)882 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
883 struct scc_priv *priv = dev->priv;
884
885 switch (cmd) {
886 case SIOCGSCCPARAM:
887 if (copy_to_user(ifr->ifr_data, &priv->param, sizeof(struct scc_param)))
888 return -EFAULT;
889 return 0;
890 case SIOCSSCCPARAM:
891 if (!capable(CAP_NET_ADMIN)) return -EPERM;
892 if (netif_running(dev)) return -EAGAIN;
893 if (copy_from_user(&priv->param, ifr->ifr_data, sizeof(struct scc_param)))
894 return -EFAULT;
895 return 0;
896 default:
897 return -EINVAL;
898 }
899 }
900
901
scc_send_packet(struct sk_buff * skb,struct net_device * dev)902 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) {
903 struct scc_priv *priv = dev->priv;
904 unsigned long flags;
905 int i;
906
907 /* Temporarily stop the scheduler feeding us packets */
908 netif_stop_queue(dev);
909
910 /* Transfer data to DMA buffer */
911 i = priv->tx_head;
912 memcpy(priv->tx_buf[i], skb->data+1, skb->len-1);
913 priv->tx_len[i] = skb->len-1;
914
915 /* Clear interrupts while we touch our circular buffers */
916 save_flags(flags);
917 cli();
918
919 /* Move the ring buffer's head */
920 priv->tx_head = (i + 1) % NUM_TX_BUF;
921 priv->tx_count++;
922
923 /* If we just filled up the last buffer, leave queue stopped.
924 The higher layers must wait until we have a DMA buffer
925 to accept the data. */
926 if (priv->tx_count < NUM_TX_BUF) netif_wake_queue(dev);
927
928 /* Set new TX state */
929 if (priv->state == IDLE) {
930 /* Assert RTS, start timer */
931 priv->state = TX_HEAD;
932 priv->tx_start = jiffies;
933 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
934 write_scc(priv, R15, 0);
935 start_timer(priv, priv->param.txdelay, 0);
936 }
937
938 /* Turn interrupts back on and free buffer */
939 restore_flags(flags);
940 dev_kfree_skb(skb);
941
942 return 0;
943 }
944
945
scc_get_stats(struct net_device * dev)946 static struct net_device_stats *scc_get_stats(struct net_device *dev) {
947 struct scc_priv *priv = dev->priv;
948
949 return &priv->stats;
950 }
951
952
scc_set_mac_address(struct net_device * dev,void * sa)953 static int scc_set_mac_address(struct net_device *dev, void *sa) {
954 memcpy(dev->dev_addr, ((struct sockaddr *)sa)->sa_data, dev->addr_len);
955 return 0;
956 }
957
958
z8530_isr(struct scc_info * info)959 static inline void z8530_isr(struct scc_info *info) {
960 int is, i = 100;
961
962 while ((is = read_scc(&info->priv[0], R3)) && i--) {
963 if (is & CHARxIP) {
964 rx_isr(&info->priv[0]);
965 } else if (is & CHATxIP) {
966 tx_isr(&info->priv[0]);
967 } else if (is & CHAEXT) {
968 es_isr(&info->priv[0]);
969 } else if (is & CHBRxIP) {
970 rx_isr(&info->priv[1]);
971 } else if (is & CHBTxIP) {
972 tx_isr(&info->priv[1]);
973 } else {
974 es_isr(&info->priv[1]);
975 }
976 write_scc(&info->priv[0], R0, RES_H_IUS);
977 i++;
978 }
979 if (i < 0) {
980 printk("dmascc: stuck in ISR with RR3=0x%02x.\n", is);
981 }
982 /* Ok, no interrupts pending from this 8530. The INT line should
983 be inactive now. */
984 }
985
986
scc_isr(int irq,void * dev_id,struct pt_regs * regs)987 static void scc_isr(int irq, void *dev_id, struct pt_regs * regs) {
988 struct scc_info *info = dev_id;
989
990 /* At this point interrupts are enabled, and the interrupt under service
991 is already acknowledged, but masked off.
992
993 Interrupt processing: We loop until we know that the IRQ line is
994 low. If another positive edge occurs afterwards during the ISR,
995 another interrupt will be triggered by the interrupt controller
996 as soon as the IRQ level is enabled again (see asm/irq.h).
997
998 Bottom-half handlers will be processed after scc_isr(). This is
999 important, since we only have small ringbuffers and want new data
1000 to be fetched/delivered immediately. */
1001
1002 if (info->priv[0].type == TYPE_TWIN) {
1003 int is, card_base = info->priv[0].card_base;
1004 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1005 TWIN_INT_MSK) {
1006 if (is & TWIN_SCC_MSK) {
1007 z8530_isr(info);
1008 } else if (is & TWIN_TMR1_MSK) {
1009 inb(card_base + TWIN_CLR_TMR1);
1010 tm_isr(&info->priv[0]);
1011 } else {
1012 inb(card_base + TWIN_CLR_TMR2);
1013 tm_isr(&info->priv[1]);
1014 }
1015 }
1016 } else z8530_isr(info);
1017 }
1018
1019
rx_isr(struct scc_priv * priv)1020 static void rx_isr(struct scc_priv *priv) {
1021 if (priv->param.dma >= 0) {
1022 /* Check special condition and perform error reset. See 2.4.7.5. */
1023 special_condition(priv, read_scc(priv, R1));
1024 write_scc(priv, R0, ERR_RES);
1025 } else {
1026 /* Check special condition for each character. Error reset not necessary.
1027 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1028 int rc;
1029 while (read_scc(priv, R0) & Rx_CH_AV) {
1030 rc = read_scc(priv, R1);
1031 if (priv->rx_ptr < BUF_SIZE)
1032 priv->rx_buf[priv->rx_head][priv->rx_ptr++] =
1033 read_scc_data(priv);
1034 else {
1035 priv->rx_over = 2;
1036 read_scc_data(priv);
1037 }
1038 special_condition(priv, rc);
1039 }
1040 }
1041 }
1042
1043
special_condition(struct scc_priv * priv,int rc)1044 static void special_condition(struct scc_priv *priv, int rc) {
1045 int cb;
1046 unsigned long flags;
1047
1048 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1049
1050 if (rc & Rx_OVR) {
1051 /* Receiver overrun */
1052 priv->rx_over = 1;
1053 if (priv->param.dma < 0) write_scc(priv, R0, ERR_RES);
1054 } else if (rc & END_FR) {
1055 /* End of frame. Get byte count */
1056 if (priv->param.dma >= 0) {
1057 flags = claim_dma_lock();
1058 cb = BUF_SIZE - get_dma_residue(priv->param.dma) - 2;
1059 release_dma_lock(flags);
1060 } else {
1061 cb = priv->rx_ptr - 2;
1062 }
1063 if (priv->rx_over) {
1064 /* We had an overrun */
1065 priv->stats.rx_errors++;
1066 if (priv->rx_over == 2) priv->stats.rx_length_errors++;
1067 else priv->stats.rx_fifo_errors++;
1068 priv->rx_over = 0;
1069 } else if (rc & CRC_ERR) {
1070 /* Count invalid CRC only if packet length >= minimum */
1071 if (cb >= 15) {
1072 priv->stats.rx_errors++;
1073 priv->stats.rx_crc_errors++;
1074 }
1075 } else {
1076 if (cb >= 15) {
1077 if (priv->rx_count < NUM_RX_BUF - 1) {
1078 /* Put good frame in FIFO */
1079 priv->rx_len[priv->rx_head] = cb;
1080 priv->rx_head = (priv->rx_head + 1) % NUM_RX_BUF;
1081 priv->rx_count++;
1082 /* Mark bottom half handler */
1083 queue_task(&priv->rx_task, &tq_immediate);
1084 mark_bh(IMMEDIATE_BH);
1085 } else {
1086 priv->stats.rx_errors++;
1087 priv->stats.rx_over_errors++;
1088 }
1089 }
1090 }
1091 /* Get ready for new frame */
1092 if (priv->param.dma >= 0) {
1093 flags = claim_dma_lock();
1094 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1095 set_dma_count(priv->param.dma, BUF_SIZE);
1096 release_dma_lock(flags);
1097 } else {
1098 priv->rx_ptr = 0;
1099 }
1100 }
1101 }
1102
1103
rx_bh(void * arg)1104 static void rx_bh(void *arg) {
1105 struct scc_priv *priv = arg;
1106 int i = priv->rx_tail;
1107 int cb;
1108 unsigned long flags;
1109 struct sk_buff *skb;
1110 unsigned char *data;
1111
1112 save_flags(flags);
1113 cli();
1114
1115 while (priv->rx_count) {
1116 restore_flags(flags);
1117 cb = priv->rx_len[i];
1118 /* Allocate buffer */
1119 skb = dev_alloc_skb(cb+1);
1120 if (skb == NULL) {
1121 /* Drop packet */
1122 priv->stats.rx_dropped++;
1123 } else {
1124 /* Fill buffer */
1125 data = skb_put(skb, cb+1);
1126 data[0] = 0;
1127 memcpy(&data[1], priv->rx_buf[i], cb);
1128 skb->dev = priv->dev;
1129 skb->protocol = ntohs(ETH_P_AX25);
1130 skb->mac.raw = skb->data;
1131 netif_rx(skb);
1132 priv->stats.rx_packets++;
1133 priv->stats.rx_bytes += cb;
1134 }
1135 save_flags(flags);
1136 cli();
1137 /* Move tail */
1138 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1139 priv->rx_count--;
1140 }
1141
1142 restore_flags(flags);
1143 }
1144
1145
tx_isr(struct scc_priv * priv)1146 static void tx_isr(struct scc_priv *priv) {
1147 int i = priv->tx_tail, p = priv->tx_ptr;
1148
1149 /* Suspend TX interrupts if we don't want to send anything.
1150 See Figure 2-22. */
1151 if (p == priv->tx_len[i]) {
1152 write_scc(priv, R0, RES_Tx_P);
1153 return;
1154 }
1155
1156 /* Write characters */
1157 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1158 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1159 }
1160
1161 /* Reset EOM latch of Z8530 */
1162 if (!priv->tx_ptr && p && priv->chip == Z8530)
1163 write_scc(priv, R0, RES_EOM_L);
1164
1165 priv->tx_ptr = p;
1166 }
1167
1168
tx_on(struct scc_priv * priv)1169 static inline void tx_on(struct scc_priv *priv) {
1170 int i, n;
1171 unsigned long flags;
1172
1173 if (priv->param.dma >= 0) {
1174 n = (priv->chip == Z85230) ? 3 : 1;
1175 /* Program DMA controller */
1176 flags = claim_dma_lock();
1177 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1178 set_dma_addr(priv->param.dma, (int) priv->tx_buf[priv->tx_tail]+n);
1179 set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail]-n);
1180 release_dma_lock(flags);
1181 /* Enable TX underrun interrupt */
1182 write_scc(priv, R15, TxUIE);
1183 /* Configure DREQ */
1184 if (priv->type == TYPE_TWIN)
1185 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1186 priv->card_base + TWIN_DMA_CFG);
1187 else
1188 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | WT_RDY_ENAB);
1189 /* Write first byte(s) */
1190 save_flags(flags);
1191 cli();
1192 for (i = 0; i < n; i++)
1193 write_scc_data(priv, priv->tx_buf[priv->tx_tail][i], 1);
1194 enable_dma(priv->param.dma);
1195 restore_flags(flags);
1196 } else {
1197 write_scc(priv, R15, TxUIE);
1198 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1199 tx_isr(priv);
1200 }
1201 /* Reset EOM latch if we do not have the AUTOEOM feature */
1202 if (priv->chip == Z8530) write_scc(priv, R0, RES_EOM_L);
1203 }
1204
1205
rx_on(struct scc_priv * priv)1206 static inline void rx_on(struct scc_priv *priv) {
1207 unsigned long flags;
1208
1209 /* Clear RX FIFO */
1210 while (read_scc(priv, R0) & Rx_CH_AV) read_scc_data(priv);
1211 priv->rx_over = 0;
1212 if (priv->param.dma >= 0) {
1213 /* Program DMA controller */
1214 flags = claim_dma_lock();
1215 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1216 set_dma_addr(priv->param.dma, (int) priv->rx_buf[priv->rx_head]);
1217 set_dma_count(priv->param.dma, BUF_SIZE);
1218 release_dma_lock(flags);
1219 enable_dma(priv->param.dma);
1220 /* Configure PackeTwin DMA */
1221 if (priv->type == TYPE_TWIN) {
1222 outb((priv->param.dma == 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1223 priv->card_base + TWIN_DMA_CFG);
1224 }
1225 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1226 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1227 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1228 } else {
1229 /* Reset current frame */
1230 priv->rx_ptr = 0;
1231 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1232 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1233 WT_FN_RDYFN);
1234 }
1235 write_scc(priv, R0, ERR_RES);
1236 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1237 }
1238
1239
rx_off(struct scc_priv * priv)1240 static inline void rx_off(struct scc_priv *priv) {
1241 /* Disable receiver */
1242 write_scc(priv, R3, Rx8);
1243 /* Disable DREQ / RX interrupt */
1244 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1245 outb(0, priv->card_base + TWIN_DMA_CFG);
1246 else
1247 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1248 /* Disable DMA */
1249 if (priv->param.dma >= 0) disable_dma(priv->param.dma);
1250 }
1251
1252
es_isr(struct scc_priv * priv)1253 static void es_isr(struct scc_priv *priv) {
1254 int i, rr0, drr0, res;
1255 unsigned long flags;
1256
1257 /* Read status, reset interrupt bit (open latches) */
1258 rr0 = read_scc(priv, R0);
1259 write_scc(priv, R0, RES_EXT_INT);
1260 drr0 = priv->rr0 ^ rr0;
1261 priv->rr0 = rr0;
1262
1263 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1264 it might have already been cleared again by AUTOEOM. */
1265 if (priv->state == TX_DATA) {
1266 /* Get remaining bytes */
1267 i = priv->tx_tail;
1268 if (priv->param.dma >= 0) {
1269 disable_dma(priv->param.dma);
1270 flags = claim_dma_lock();
1271 res = get_dma_residue(priv->param.dma);
1272 release_dma_lock(flags);
1273 } else {
1274 res = priv->tx_len[i] - priv->tx_ptr;
1275 priv->tx_ptr = 0;
1276 }
1277 /* Disable DREQ / TX interrupt */
1278 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1279 outb(0, priv->card_base + TWIN_DMA_CFG);
1280 else
1281 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1282 if (res) {
1283 /* Update packet statistics */
1284 priv->stats.tx_errors++;
1285 priv->stats.tx_fifo_errors++;
1286 /* Other underrun interrupts may already be waiting */
1287 write_scc(priv, R0, RES_EXT_INT);
1288 write_scc(priv, R0, RES_EXT_INT);
1289 } else {
1290 /* Update packet statistics */
1291 priv->stats.tx_packets++;
1292 priv->stats.tx_bytes += priv->tx_len[i];
1293 /* Remove frame from FIFO */
1294 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1295 priv->tx_count--;
1296 /* Inform upper layers */
1297 netif_wake_queue(priv->dev);
1298 }
1299 /* Switch state */
1300 write_scc(priv, R15, 0);
1301 if (priv->tx_count &&
1302 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1303 priv->state = TX_PAUSE;
1304 start_timer(priv, priv->param.txpause, 0);
1305 } else {
1306 priv->state = TX_TAIL;
1307 start_timer(priv, priv->param.txtail, 0);
1308 }
1309 }
1310
1311 /* DCD transition */
1312 if (drr0 & DCD) {
1313 if (rr0 & DCD) {
1314 switch (priv->state) {
1315 case IDLE:
1316 case WAIT:
1317 priv->state = DCD_ON;
1318 write_scc(priv, R15, 0);
1319 start_timer(priv, priv->param.dcdon, 0);
1320 }
1321 } else {
1322 switch (priv->state) {
1323 case RX_ON:
1324 rx_off(priv);
1325 priv->state = DCD_OFF;
1326 write_scc(priv, R15, 0);
1327 start_timer(priv, priv->param.dcdoff, 0);
1328 }
1329 }
1330 }
1331
1332 /* CTS transition */
1333 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1334 tm_isr(priv);
1335
1336 }
1337
1338
tm_isr(struct scc_priv * priv)1339 static void tm_isr(struct scc_priv *priv) {
1340 switch (priv->state) {
1341 case TX_HEAD:
1342 case TX_PAUSE:
1343 tx_on(priv);
1344 priv->state = TX_DATA;
1345 break;
1346 case TX_TAIL:
1347 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1348 priv->state = RTS_OFF;
1349 if (priv->type != TYPE_TWIN) write_scc(priv, R15, 0);
1350 start_timer(priv, priv->param.rtsoff, 0);
1351 break;
1352 case RTS_OFF:
1353 write_scc(priv, R15, DCDIE);
1354 priv->rr0 = read_scc(priv, R0);
1355 if (priv->rr0 & DCD) {
1356 priv->stats.collisions++;
1357 rx_on(priv);
1358 priv->state = RX_ON;
1359 } else {
1360 priv->state = WAIT;
1361 start_timer(priv, priv->param.waittime, DCDIE);
1362 }
1363 break;
1364 case WAIT:
1365 if (priv->tx_count) {
1366 priv->state = TX_HEAD;
1367 priv->tx_start = jiffies;
1368 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
1369 write_scc(priv, R15, 0);
1370 start_timer(priv, priv->param.txdelay, 0);
1371 } else {
1372 priv->state = IDLE;
1373 if (priv->type != TYPE_TWIN) write_scc(priv, R15, DCDIE);
1374 }
1375 break;
1376 case DCD_ON:
1377 case DCD_OFF:
1378 write_scc(priv, R15, DCDIE);
1379 priv->rr0 = read_scc(priv, R0);
1380 if (priv->rr0 & DCD) {
1381 rx_on(priv);
1382 priv->state = RX_ON;
1383 } else {
1384 priv->state = WAIT;
1385 start_timer(priv,
1386 random()/priv->param.persist*priv->param.slottime,
1387 DCDIE);
1388 }
1389 break;
1390 }
1391 }
1392
1393
start_timer(struct scc_priv * priv,int t,int r15)1394 static void start_timer(struct scc_priv *priv, int t, int r15) {
1395 unsigned long flags;
1396
1397 outb(priv->tmr_mode, priv->tmr_ctrl);
1398 if (t == 0) {
1399 tm_isr(priv);
1400 } else if (t > 0) {
1401 save_flags(flags);
1402 cli();
1403 outb(t & 0xFF, priv->tmr_cnt);
1404 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1405 if (priv->type != TYPE_TWIN) {
1406 write_scc(priv, R15, r15 | CTSIE);
1407 priv->rr0 |= CTS;
1408 }
1409 restore_flags(flags);
1410 }
1411 }
1412
1413