1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
12 *
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
21 *
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
28 *
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
32 *
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
37 */
38
39 /*
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
42 *
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
48 *
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
51 */
52
53 /*
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
56 */
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
58
59 /*
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
63
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
72
73 The packet is encapsulated as if it were ethernet.
74
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
89 */
90
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/sched.h>
94 #include <linux/types.h>
95 #include <linux/fcntl.h>
96 #include <linux/interrupt.h>
97 #include <linux/string.h>
98 #include <linux/ptrace.h>
99 #include <linux/if_ether.h>
100 #include <asm/system.h>
101 #include <linux/in.h>
102 #include <linux/errno.h>
103 #include <linux/delay.h>
104 #include <linux/lp.h>
105 #include <linux/init.h>
106
107 #include <linux/netdevice.h>
108 #include <linux/etherdevice.h>
109 #include <linux/inetdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/if_plip.h>
112 #include <net/neighbour.h>
113
114 #include <linux/tqueue.h>
115 #include <linux/ioport.h>
116 #include <linux/spinlock.h>
117 #include <asm/bitops.h>
118 #include <asm/irq.h>
119 #include <asm/byteorder.h>
120 #include <asm/semaphore.h>
121
122 #include <linux/parport.h>
123
124 /* Maximum number of devices to support. */
125 #define PLIP_MAX 8
126
127 /* Use 0 for production, 1 for verification, >2 for debug */
128 #ifndef NET_DEBUG
129 #define NET_DEBUG 1
130 #endif
131 static unsigned int net_debug = NET_DEBUG;
132
133 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
134 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
135
136 /* In micro second */
137 #define PLIP_DELAY_UNIT 1
138
139 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_TRIGGER_WAIT 500
141
142 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
143 #define PLIP_NIBBLE_WAIT 3000
144
145 /* Bottom halves */
146 static void plip_kick_bh(struct net_device *dev);
147 static void plip_bh(struct net_device *dev);
148 static void plip_timer_bh(struct net_device *dev);
149
150 /* Interrupt handler */
151 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
152
153 /* Functions for DEV methods */
154 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
155 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
156 unsigned short type, void *daddr,
157 void *saddr, unsigned len);
158 static int plip_hard_header_cache(struct neighbour *neigh,
159 struct hh_cache *hh);
160 static int plip_open(struct net_device *dev);
161 static int plip_close(struct net_device *dev);
162 static struct net_device_stats *plip_get_stats(struct net_device *dev);
163 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
164 static int plip_preempt(void *handle);
165 static void plip_wakeup(void *handle);
166
167 enum plip_connection_state {
168 PLIP_CN_NONE=0,
169 PLIP_CN_RECEIVE,
170 PLIP_CN_SEND,
171 PLIP_CN_CLOSING,
172 PLIP_CN_ERROR
173 };
174
175 enum plip_packet_state {
176 PLIP_PK_DONE=0,
177 PLIP_PK_TRIGGER,
178 PLIP_PK_LENGTH_LSB,
179 PLIP_PK_LENGTH_MSB,
180 PLIP_PK_DATA,
181 PLIP_PK_CHECKSUM
182 };
183
184 enum plip_nibble_state {
185 PLIP_NB_BEGIN,
186 PLIP_NB_1,
187 PLIP_NB_2,
188 };
189
190 struct plip_local {
191 enum plip_packet_state state;
192 enum plip_nibble_state nibble;
193 union {
194 struct {
195 #if defined(__LITTLE_ENDIAN)
196 unsigned char lsb;
197 unsigned char msb;
198 #elif defined(__BIG_ENDIAN)
199 unsigned char msb;
200 unsigned char lsb;
201 #else
202 #error "Please fix the endianness defines in <asm/byteorder.h>"
203 #endif
204 } b;
205 unsigned short h;
206 } length;
207 unsigned short byte;
208 unsigned char checksum;
209 unsigned char data;
210 struct sk_buff *skb;
211 };
212
213 struct net_local {
214 struct net_device_stats enet_stats;
215 struct tq_struct immediate;
216 struct tq_struct deferred;
217 struct tq_struct timer;
218 struct plip_local snd_data;
219 struct plip_local rcv_data;
220 struct pardevice *pardev;
221 unsigned long trigger;
222 unsigned long nibble;
223 enum plip_connection_state connection;
224 unsigned short timeout_count;
225 int is_deferred;
226 int port_owner;
227 int should_relinquish;
228 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
229 unsigned short type, void *daddr,
230 void *saddr, unsigned len);
231 int (*orig_hard_header_cache)(struct neighbour *neigh,
232 struct hh_cache *hh);
233 spinlock_t lock;
234 atomic_t kill_timer;
235 struct semaphore killed_timer_sem;
236 };
237
enable_parport_interrupts(struct net_device * dev)238 inline static void enable_parport_interrupts (struct net_device *dev)
239 {
240 if (dev->irq != -1)
241 {
242 struct parport *port =
243 ((struct net_local *)dev->priv)->pardev->port;
244 port->ops->enable_irq (port);
245 }
246 }
247
disable_parport_interrupts(struct net_device * dev)248 inline static void disable_parport_interrupts (struct net_device *dev)
249 {
250 if (dev->irq != -1)
251 {
252 struct parport *port =
253 ((struct net_local *)dev->priv)->pardev->port;
254 port->ops->disable_irq (port);
255 }
256 }
257
write_data(struct net_device * dev,unsigned char data)258 inline static void write_data (struct net_device *dev, unsigned char data)
259 {
260 struct parport *port =
261 ((struct net_local *)dev->priv)->pardev->port;
262
263 port->ops->write_data (port, data);
264 }
265
read_status(struct net_device * dev)266 inline static unsigned char read_status (struct net_device *dev)
267 {
268 struct parport *port =
269 ((struct net_local *)dev->priv)->pardev->port;
270
271 return port->ops->read_status (port);
272 }
273
274 /* Entry point of PLIP driver.
275 Probe the hardware, and register/initialize the driver.
276
277 PLIP is rather weird, because of the way it interacts with the parport
278 system. It is _not_ initialised from Space.c. Instead, plip_init()
279 is called, and that function makes up a "struct net_device" for each port, and
280 then calls us here.
281
282 */
283 int __init
plip_init_dev(struct net_device * dev,struct parport * pb)284 plip_init_dev(struct net_device *dev, struct parport *pb)
285 {
286 struct net_local *nl;
287 struct pardevice *pardev;
288
289 SET_MODULE_OWNER(dev);
290 dev->irq = pb->irq;
291 dev->base_addr = pb->base;
292
293 if (pb->irq == -1) {
294 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
295 "which is fairly inefficient!\n", pb->name);
296 }
297
298 pardev = parport_register_device(pb, dev->name, plip_preempt,
299 plip_wakeup, plip_interrupt,
300 0, dev);
301
302 if (!pardev)
303 return -ENODEV;
304
305 printk(KERN_INFO "%s", version);
306 if (dev->irq != -1)
307 printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d.\n",
308 dev->name, dev->base_addr, dev->irq);
309 else
310 printk(KERN_INFO "%s: Parallel port at %#3lx, not using IRQ.\n",
311 dev->name, dev->base_addr);
312
313 /* Fill in the generic fields of the device structure. */
314 ether_setup(dev);
315
316 /* Then, override parts of it */
317 dev->hard_start_xmit = plip_tx_packet;
318 dev->open = plip_open;
319 dev->stop = plip_close;
320 dev->get_stats = plip_get_stats;
321 dev->do_ioctl = plip_ioctl;
322 dev->header_cache_update = NULL;
323 dev->tx_queue_len = 10;
324 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
325 memset(dev->dev_addr, 0xfc, ETH_ALEN);
326
327 /* Set the private structure */
328 dev->priv = kmalloc(sizeof (struct net_local), GFP_KERNEL);
329 if (dev->priv == NULL) {
330 printk(KERN_ERR "%s: out of memory\n", dev->name);
331 parport_unregister_device(pardev);
332 return -ENOMEM;
333 }
334 memset(dev->priv, 0, sizeof(struct net_local));
335 nl = (struct net_local *) dev->priv;
336
337 nl->orig_hard_header = dev->hard_header;
338 dev->hard_header = plip_hard_header;
339
340 nl->orig_hard_header_cache = dev->hard_header_cache;
341 dev->hard_header_cache = plip_hard_header_cache;
342
343 nl->pardev = pardev;
344
345 nl->port_owner = 0;
346
347 /* Initialize constants */
348 nl->trigger = PLIP_TRIGGER_WAIT;
349 nl->nibble = PLIP_NIBBLE_WAIT;
350
351 /* Initialize task queue structures */
352 INIT_LIST_HEAD(&nl->immediate.list);
353 nl->immediate.sync = 0;
354 nl->immediate.routine = (void (*)(void *))plip_bh;
355 nl->immediate.data = dev;
356
357 INIT_LIST_HEAD(&nl->deferred.list);
358 nl->deferred.sync = 0;
359 nl->deferred.routine = (void (*)(void *))plip_kick_bh;
360 nl->deferred.data = dev;
361
362 if (dev->irq == -1) {
363 INIT_LIST_HEAD(&nl->timer.list);
364 nl->timer.sync = 0;
365 nl->timer.routine = (void (*)(void *))plip_timer_bh;
366 nl->timer.data = dev;
367 }
368
369 spin_lock_init(&nl->lock);
370
371 return 0;
372 }
373
374 /* Bottom half handler for the delayed request.
375 This routine is kicked by do_timer().
376 Request `plip_bh' to be invoked. */
377 static void
plip_kick_bh(struct net_device * dev)378 plip_kick_bh(struct net_device *dev)
379 {
380 struct net_local *nl = (struct net_local *)dev->priv;
381
382 if (nl->is_deferred) {
383 queue_task(&nl->immediate, &tq_immediate);
384 mark_bh(IMMEDIATE_BH);
385 }
386 }
387
388 /* Forward declarations of internal routines */
389 static int plip_none(struct net_device *, struct net_local *,
390 struct plip_local *, struct plip_local *);
391 static int plip_receive_packet(struct net_device *, struct net_local *,
392 struct plip_local *, struct plip_local *);
393 static int plip_send_packet(struct net_device *, struct net_local *,
394 struct plip_local *, struct plip_local *);
395 static int plip_connection_close(struct net_device *, struct net_local *,
396 struct plip_local *, struct plip_local *);
397 static int plip_error(struct net_device *, struct net_local *,
398 struct plip_local *, struct plip_local *);
399 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
400 struct plip_local *snd,
401 struct plip_local *rcv,
402 int error);
403
404 #define OK 0
405 #define TIMEOUT 1
406 #define ERROR 2
407 #define HS_TIMEOUT 3
408
409 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
410 struct plip_local *snd, struct plip_local *rcv);
411
412 static plip_func connection_state_table[] =
413 {
414 plip_none,
415 plip_receive_packet,
416 plip_send_packet,
417 plip_connection_close,
418 plip_error
419 };
420
421 /* Bottom half handler of PLIP. */
422 static void
plip_bh(struct net_device * dev)423 plip_bh(struct net_device *dev)
424 {
425 struct net_local *nl = (struct net_local *)dev->priv;
426 struct plip_local *snd = &nl->snd_data;
427 struct plip_local *rcv = &nl->rcv_data;
428 plip_func f;
429 int r;
430
431 nl->is_deferred = 0;
432 f = connection_state_table[nl->connection];
433 if ((r = (*f)(dev, nl, snd, rcv)) != OK
434 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
435 nl->is_deferred = 1;
436 queue_task(&nl->deferred, &tq_timer);
437 }
438 }
439
440 static void
plip_timer_bh(struct net_device * dev)441 plip_timer_bh(struct net_device *dev)
442 {
443 struct net_local *nl = (struct net_local *)dev->priv;
444
445 if (!(atomic_read (&nl->kill_timer))) {
446 plip_interrupt (-1, dev, NULL);
447
448 queue_task (&nl->timer, &tq_timer);
449 }
450 else {
451 up (&nl->killed_timer_sem);
452 }
453 }
454
455 static int
plip_bh_timeout_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv,int error)456 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
457 struct plip_local *snd, struct plip_local *rcv,
458 int error)
459 {
460 unsigned char c0;
461 /*
462 * This is tricky. If we got here from the beginning of send (either
463 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
464 * already disabled. With the old variant of {enable,disable}_irq()
465 * extra disable_irq() was a no-op. Now it became mortal - it's
466 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
467 * that is). So we have to treat HS_TIMEOUT and ERROR from send
468 * in a special way.
469 */
470
471 spin_lock_irq(&nl->lock);
472 if (nl->connection == PLIP_CN_SEND) {
473
474 if (error != ERROR) { /* Timeout */
475 nl->timeout_count++;
476 if ((error == HS_TIMEOUT
477 && nl->timeout_count <= 10)
478 || nl->timeout_count <= 3) {
479 spin_unlock_irq(&nl->lock);
480 /* Try again later */
481 return TIMEOUT;
482 }
483 c0 = read_status(dev);
484 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
485 dev->name, snd->state, c0);
486 } else
487 error = HS_TIMEOUT;
488 nl->enet_stats.tx_errors++;
489 nl->enet_stats.tx_aborted_errors++;
490 } else if (nl->connection == PLIP_CN_RECEIVE) {
491 if (rcv->state == PLIP_PK_TRIGGER) {
492 /* Transmission was interrupted. */
493 spin_unlock_irq(&nl->lock);
494 return OK;
495 }
496 if (error != ERROR) { /* Timeout */
497 if (++nl->timeout_count <= 3) {
498 spin_unlock_irq(&nl->lock);
499 /* Try again later */
500 return TIMEOUT;
501 }
502 c0 = read_status(dev);
503 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
504 dev->name, rcv->state, c0);
505 }
506 nl->enet_stats.rx_dropped++;
507 }
508 rcv->state = PLIP_PK_DONE;
509 if (rcv->skb) {
510 kfree_skb(rcv->skb);
511 rcv->skb = NULL;
512 }
513 snd->state = PLIP_PK_DONE;
514 if (snd->skb) {
515 dev_kfree_skb(snd->skb);
516 snd->skb = NULL;
517 }
518 spin_unlock_irq(&nl->lock);
519 if (error == HS_TIMEOUT) {
520 DISABLE(dev->irq);
521 synchronize_irq();
522 }
523 disable_parport_interrupts (dev);
524 netif_stop_queue (dev);
525 nl->connection = PLIP_CN_ERROR;
526 write_data (dev, 0x00);
527
528 return TIMEOUT;
529 }
530
531 static int
plip_none(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)532 plip_none(struct net_device *dev, struct net_local *nl,
533 struct plip_local *snd, struct plip_local *rcv)
534 {
535 return OK;
536 }
537
538 /* PLIP_RECEIVE --- receive a byte(two nibbles)
539 Returns OK on success, TIMEOUT on timeout */
540 inline static int
plip_receive(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char * data_p)541 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
542 enum plip_nibble_state *ns_p, unsigned char *data_p)
543 {
544 unsigned char c0, c1;
545 unsigned int cx;
546
547 switch (*ns_p) {
548 case PLIP_NB_BEGIN:
549 cx = nibble_timeout;
550 while (1) {
551 c0 = read_status(dev);
552 udelay(PLIP_DELAY_UNIT);
553 if ((c0 & 0x80) == 0) {
554 c1 = read_status(dev);
555 if (c0 == c1)
556 break;
557 }
558 if (--cx == 0)
559 return TIMEOUT;
560 }
561 *data_p = (c0 >> 3) & 0x0f;
562 write_data (dev, 0x10); /* send ACK */
563 *ns_p = PLIP_NB_1;
564
565 case PLIP_NB_1:
566 cx = nibble_timeout;
567 while (1) {
568 c0 = read_status(dev);
569 udelay(PLIP_DELAY_UNIT);
570 if (c0 & 0x80) {
571 c1 = read_status(dev);
572 if (c0 == c1)
573 break;
574 }
575 if (--cx == 0)
576 return TIMEOUT;
577 }
578 *data_p |= (c0 << 1) & 0xf0;
579 write_data (dev, 0x00); /* send ACK */
580 *ns_p = PLIP_NB_BEGIN;
581 case PLIP_NB_2:
582 break;
583 }
584 return OK;
585 }
586
587 /*
588 * Determine the packet's protocol ID. The rule here is that we
589 * assume 802.3 if the type field is short enough to be a length.
590 * This is normal practice and works for any 'now in use' protocol.
591 *
592 * PLIP is ethernet ish but the daddr might not be valid if unicast.
593 * PLIP fortunately has no bus architecture (its Point-to-point).
594 *
595 * We can't fix the daddr thing as that quirk (more bug) is embedded
596 * in far too many old systems not all even running Linux.
597 */
598
plip_type_trans(struct sk_buff * skb,struct net_device * dev)599 static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
600 {
601 struct ethhdr *eth;
602 unsigned char *rawp;
603
604 skb->mac.raw=skb->data;
605 skb_pull(skb,dev->hard_header_len);
606 eth= skb->mac.ethernet;
607
608 if(*eth->h_dest&1)
609 {
610 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
611 skb->pkt_type=PACKET_BROADCAST;
612 else
613 skb->pkt_type=PACKET_MULTICAST;
614 }
615
616 /*
617 * This ALLMULTI check should be redundant by 1.4
618 * so don't forget to remove it.
619 */
620
621 if (ntohs(eth->h_proto) >= 1536)
622 return eth->h_proto;
623
624 rawp = skb->data;
625
626 /*
627 * This is a magic hack to spot IPX packets. Older Novell breaks
628 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
629 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
630 * won't work for fault tolerant netware but does for the rest.
631 */
632 if (*(unsigned short *)rawp == 0xFFFF)
633 return htons(ETH_P_802_3);
634
635 /*
636 * Real 802.2 LLC
637 */
638 return htons(ETH_P_802_2);
639 }
640
641
642 /* PLIP_RECEIVE_PACKET --- receive a packet */
643 static int
plip_receive_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)644 plip_receive_packet(struct net_device *dev, struct net_local *nl,
645 struct plip_local *snd, struct plip_local *rcv)
646 {
647 unsigned short nibble_timeout = nl->nibble;
648 unsigned char *lbuf;
649
650 switch (rcv->state) {
651 case PLIP_PK_TRIGGER:
652 DISABLE(dev->irq);
653 /* Don't need to synchronize irq, as we can safely ignore it */
654 disable_parport_interrupts (dev);
655 write_data (dev, 0x01); /* send ACK */
656 if (net_debug > 2)
657 printk(KERN_DEBUG "%s: receive start\n", dev->name);
658 rcv->state = PLIP_PK_LENGTH_LSB;
659 rcv->nibble = PLIP_NB_BEGIN;
660
661 case PLIP_PK_LENGTH_LSB:
662 if (snd->state != PLIP_PK_DONE) {
663 if (plip_receive(nl->trigger, dev,
664 &rcv->nibble, &rcv->length.b.lsb)) {
665 /* collision, here dev->tbusy == 1 */
666 rcv->state = PLIP_PK_DONE;
667 nl->is_deferred = 1;
668 nl->connection = PLIP_CN_SEND;
669 queue_task(&nl->deferred, &tq_timer);
670 enable_parport_interrupts (dev);
671 ENABLE(dev->irq);
672 return OK;
673 }
674 } else {
675 if (plip_receive(nibble_timeout, dev,
676 &rcv->nibble, &rcv->length.b.lsb))
677 return TIMEOUT;
678 }
679 rcv->state = PLIP_PK_LENGTH_MSB;
680
681 case PLIP_PK_LENGTH_MSB:
682 if (plip_receive(nibble_timeout, dev,
683 &rcv->nibble, &rcv->length.b.msb))
684 return TIMEOUT;
685 if (rcv->length.h > dev->mtu + dev->hard_header_len
686 || rcv->length.h < 8) {
687 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
688 return ERROR;
689 }
690 /* Malloc up new buffer. */
691 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
692 if (rcv->skb == NULL) {
693 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
694 return ERROR;
695 }
696 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
697 skb_put(rcv->skb,rcv->length.h);
698 rcv->skb->dev = dev;
699 rcv->state = PLIP_PK_DATA;
700 rcv->byte = 0;
701 rcv->checksum = 0;
702
703 case PLIP_PK_DATA:
704 lbuf = rcv->skb->data;
705 do
706 if (plip_receive(nibble_timeout, dev,
707 &rcv->nibble, &lbuf[rcv->byte]))
708 return TIMEOUT;
709 while (++rcv->byte < rcv->length.h);
710 do
711 rcv->checksum += lbuf[--rcv->byte];
712 while (rcv->byte);
713 rcv->state = PLIP_PK_CHECKSUM;
714
715 case PLIP_PK_CHECKSUM:
716 if (plip_receive(nibble_timeout, dev,
717 &rcv->nibble, &rcv->data))
718 return TIMEOUT;
719 if (rcv->data != rcv->checksum) {
720 nl->enet_stats.rx_crc_errors++;
721 if (net_debug)
722 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
723 return ERROR;
724 }
725 rcv->state = PLIP_PK_DONE;
726
727 case PLIP_PK_DONE:
728 /* Inform the upper layer for the arrival of a packet. */
729 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
730 netif_rx(rcv->skb);
731 dev->last_rx = jiffies;
732 nl->enet_stats.rx_bytes += rcv->length.h;
733 nl->enet_stats.rx_packets++;
734 rcv->skb = NULL;
735 if (net_debug > 2)
736 printk(KERN_DEBUG "%s: receive end\n", dev->name);
737
738 /* Close the connection. */
739 write_data (dev, 0x00);
740 spin_lock_irq(&nl->lock);
741 if (snd->state != PLIP_PK_DONE) {
742 nl->connection = PLIP_CN_SEND;
743 spin_unlock_irq(&nl->lock);
744 queue_task(&nl->immediate, &tq_immediate);
745 mark_bh(IMMEDIATE_BH);
746 enable_parport_interrupts (dev);
747 ENABLE(dev->irq);
748 return OK;
749 } else {
750 nl->connection = PLIP_CN_NONE;
751 spin_unlock_irq(&nl->lock);
752 enable_parport_interrupts (dev);
753 ENABLE(dev->irq);
754 return OK;
755 }
756 }
757 return OK;
758 }
759
760 /* PLIP_SEND --- send a byte (two nibbles)
761 Returns OK on success, TIMEOUT when timeout */
762 inline static int
plip_send(unsigned short nibble_timeout,struct net_device * dev,enum plip_nibble_state * ns_p,unsigned char data)763 plip_send(unsigned short nibble_timeout, struct net_device *dev,
764 enum plip_nibble_state *ns_p, unsigned char data)
765 {
766 unsigned char c0;
767 unsigned int cx;
768
769 switch (*ns_p) {
770 case PLIP_NB_BEGIN:
771 write_data (dev, data & 0x0f);
772 *ns_p = PLIP_NB_1;
773
774 case PLIP_NB_1:
775 write_data (dev, 0x10 | (data & 0x0f));
776 cx = nibble_timeout;
777 while (1) {
778 c0 = read_status(dev);
779 if ((c0 & 0x80) == 0)
780 break;
781 if (--cx == 0)
782 return TIMEOUT;
783 udelay(PLIP_DELAY_UNIT);
784 }
785 write_data (dev, 0x10 | (data >> 4));
786 *ns_p = PLIP_NB_2;
787
788 case PLIP_NB_2:
789 write_data (dev, (data >> 4));
790 cx = nibble_timeout;
791 while (1) {
792 c0 = read_status(dev);
793 if (c0 & 0x80)
794 break;
795 if (--cx == 0)
796 return TIMEOUT;
797 udelay(PLIP_DELAY_UNIT);
798 }
799 *ns_p = PLIP_NB_BEGIN;
800 return OK;
801 }
802 return OK;
803 }
804
805 /* PLIP_SEND_PACKET --- send a packet */
806 static int
plip_send_packet(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)807 plip_send_packet(struct net_device *dev, struct net_local *nl,
808 struct plip_local *snd, struct plip_local *rcv)
809 {
810 unsigned short nibble_timeout = nl->nibble;
811 unsigned char *lbuf;
812 unsigned char c0;
813 unsigned int cx;
814
815 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
816 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
817 snd->state = PLIP_PK_DONE;
818 snd->skb = NULL;
819 return ERROR;
820 }
821
822 switch (snd->state) {
823 case PLIP_PK_TRIGGER:
824 if ((read_status(dev) & 0xf8) != 0x80)
825 return HS_TIMEOUT;
826
827 /* Trigger remote rx interrupt. */
828 write_data (dev, 0x08);
829 cx = nl->trigger;
830 while (1) {
831 udelay(PLIP_DELAY_UNIT);
832 spin_lock_irq(&nl->lock);
833 if (nl->connection == PLIP_CN_RECEIVE) {
834 spin_unlock_irq(&nl->lock);
835 /* Interrupted. */
836 nl->enet_stats.collisions++;
837 return OK;
838 }
839 c0 = read_status(dev);
840 if (c0 & 0x08) {
841 spin_unlock_irq(&nl->lock);
842 DISABLE(dev->irq);
843 synchronize_irq();
844 if (nl->connection == PLIP_CN_RECEIVE) {
845 /* Interrupted.
846 We don't need to enable irq,
847 as it is soon disabled. */
848 /* Yes, we do. New variant of
849 {enable,disable}_irq *counts*
850 them. -- AV */
851 ENABLE(dev->irq);
852 nl->enet_stats.collisions++;
853 return OK;
854 }
855 disable_parport_interrupts (dev);
856 if (net_debug > 2)
857 printk(KERN_DEBUG "%s: send start\n", dev->name);
858 snd->state = PLIP_PK_LENGTH_LSB;
859 snd->nibble = PLIP_NB_BEGIN;
860 nl->timeout_count = 0;
861 break;
862 }
863 spin_unlock_irq(&nl->lock);
864 if (--cx == 0) {
865 write_data (dev, 0x00);
866 return HS_TIMEOUT;
867 }
868 }
869
870 case PLIP_PK_LENGTH_LSB:
871 if (plip_send(nibble_timeout, dev,
872 &snd->nibble, snd->length.b.lsb))
873 return TIMEOUT;
874 snd->state = PLIP_PK_LENGTH_MSB;
875
876 case PLIP_PK_LENGTH_MSB:
877 if (plip_send(nibble_timeout, dev,
878 &snd->nibble, snd->length.b.msb))
879 return TIMEOUT;
880 snd->state = PLIP_PK_DATA;
881 snd->byte = 0;
882 snd->checksum = 0;
883
884 case PLIP_PK_DATA:
885 do
886 if (plip_send(nibble_timeout, dev,
887 &snd->nibble, lbuf[snd->byte]))
888 return TIMEOUT;
889 while (++snd->byte < snd->length.h);
890 do
891 snd->checksum += lbuf[--snd->byte];
892 while (snd->byte);
893 snd->state = PLIP_PK_CHECKSUM;
894
895 case PLIP_PK_CHECKSUM:
896 if (plip_send(nibble_timeout, dev,
897 &snd->nibble, snd->checksum))
898 return TIMEOUT;
899
900 nl->enet_stats.tx_bytes += snd->skb->len;
901 dev_kfree_skb(snd->skb);
902 nl->enet_stats.tx_packets++;
903 snd->state = PLIP_PK_DONE;
904
905 case PLIP_PK_DONE:
906 /* Close the connection */
907 write_data (dev, 0x00);
908 snd->skb = NULL;
909 if (net_debug > 2)
910 printk(KERN_DEBUG "%s: send end\n", dev->name);
911 nl->connection = PLIP_CN_CLOSING;
912 nl->is_deferred = 1;
913 queue_task(&nl->deferred, &tq_timer);
914 enable_parport_interrupts (dev);
915 ENABLE(dev->irq);
916 return OK;
917 }
918 return OK;
919 }
920
921 static int
plip_connection_close(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)922 plip_connection_close(struct net_device *dev, struct net_local *nl,
923 struct plip_local *snd, struct plip_local *rcv)
924 {
925 spin_lock_irq(&nl->lock);
926 if (nl->connection == PLIP_CN_CLOSING) {
927 nl->connection = PLIP_CN_NONE;
928 netif_wake_queue (dev);
929 }
930 spin_unlock_irq(&nl->lock);
931 if (nl->should_relinquish) {
932 nl->should_relinquish = nl->port_owner = 0;
933 parport_release(nl->pardev);
934 }
935 return OK;
936 }
937
938 /* PLIP_ERROR --- wait till other end settled */
939 static int
plip_error(struct net_device * dev,struct net_local * nl,struct plip_local * snd,struct plip_local * rcv)940 plip_error(struct net_device *dev, struct net_local *nl,
941 struct plip_local *snd, struct plip_local *rcv)
942 {
943 unsigned char status;
944
945 status = read_status(dev);
946 if ((status & 0xf8) == 0x80) {
947 if (net_debug > 2)
948 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
949 nl->connection = PLIP_CN_NONE;
950 nl->should_relinquish = 0;
951 netif_start_queue (dev);
952 enable_parport_interrupts (dev);
953 ENABLE(dev->irq);
954 netif_wake_queue (dev);
955 } else {
956 nl->is_deferred = 1;
957 queue_task(&nl->deferred, &tq_timer);
958 }
959
960 return OK;
961 }
962
963 /* Handle the parallel port interrupts. */
964 static void
plip_interrupt(int irq,void * dev_id,struct pt_regs * regs)965 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
966 {
967 struct net_device *dev = dev_id;
968 struct net_local *nl;
969 struct plip_local *rcv;
970 unsigned char c0;
971
972 if (dev == NULL) {
973 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
974 return;
975 }
976
977 nl = (struct net_local *)dev->priv;
978 rcv = &nl->rcv_data;
979
980 spin_lock_irq (&nl->lock);
981
982 c0 = read_status(dev);
983 if ((c0 & 0xf8) != 0xc0) {
984 if ((dev->irq != -1) && (net_debug > 1))
985 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
986 spin_unlock_irq (&nl->lock);
987 return;
988 }
989
990 if (net_debug > 3)
991 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
992
993 switch (nl->connection) {
994 case PLIP_CN_CLOSING:
995 netif_wake_queue (dev);
996 case PLIP_CN_NONE:
997 case PLIP_CN_SEND:
998 rcv->state = PLIP_PK_TRIGGER;
999 nl->connection = PLIP_CN_RECEIVE;
1000 nl->timeout_count = 0;
1001 queue_task(&nl->immediate, &tq_immediate);
1002 mark_bh(IMMEDIATE_BH);
1003 break;
1004
1005 case PLIP_CN_RECEIVE:
1006 /* May occur because there is race condition
1007 around test and set of dev->interrupt.
1008 Ignore this interrupt. */
1009 break;
1010
1011 case PLIP_CN_ERROR:
1012 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
1013 break;
1014 }
1015
1016 spin_unlock_irq(&nl->lock);
1017 }
1018
1019 static int
plip_tx_packet(struct sk_buff * skb,struct net_device * dev)1020 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
1021 {
1022 struct net_local *nl = (struct net_local *)dev->priv;
1023 struct plip_local *snd = &nl->snd_data;
1024
1025 if (netif_queue_stopped(dev))
1026 return 1;
1027
1028 /* We may need to grab the bus */
1029 if (!nl->port_owner) {
1030 if (parport_claim(nl->pardev))
1031 return 1;
1032 nl->port_owner = 1;
1033 }
1034
1035 netif_stop_queue (dev);
1036
1037 if (skb->len > dev->mtu + dev->hard_header_len) {
1038 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
1039 netif_start_queue (dev);
1040 return 1;
1041 }
1042
1043 if (net_debug > 2)
1044 printk(KERN_DEBUG "%s: send request\n", dev->name);
1045
1046 spin_lock_irq(&nl->lock);
1047 dev->trans_start = jiffies;
1048 snd->skb = skb;
1049 snd->length.h = skb->len;
1050 snd->state = PLIP_PK_TRIGGER;
1051 if (nl->connection == PLIP_CN_NONE) {
1052 nl->connection = PLIP_CN_SEND;
1053 nl->timeout_count = 0;
1054 }
1055 queue_task(&nl->immediate, &tq_immediate);
1056 mark_bh(IMMEDIATE_BH);
1057 spin_unlock_irq(&nl->lock);
1058
1059 return 0;
1060 }
1061
1062 static void
plip_rewrite_address(struct net_device * dev,struct ethhdr * eth)1063 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1064 {
1065 struct in_device *in_dev;
1066
1067 if ((in_dev=dev->ip_ptr) != NULL) {
1068 /* Any address will do - we take the first */
1069 struct in_ifaddr *ifa=in_dev->ifa_list;
1070 if (ifa != NULL) {
1071 memcpy(eth->h_source, dev->dev_addr, 6);
1072 memset(eth->h_dest, 0xfc, 2);
1073 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1074 }
1075 }
1076 }
1077
1078 static int
plip_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,void * daddr,void * saddr,unsigned len)1079 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1080 unsigned short type, void *daddr,
1081 void *saddr, unsigned len)
1082 {
1083 struct net_local *nl = (struct net_local *)dev->priv;
1084 int ret;
1085
1086 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1087 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1088
1089 return ret;
1090 }
1091
plip_hard_header_cache(struct neighbour * neigh,struct hh_cache * hh)1092 int plip_hard_header_cache(struct neighbour *neigh,
1093 struct hh_cache *hh)
1094 {
1095 struct net_local *nl = (struct net_local *)neigh->dev->priv;
1096 int ret;
1097
1098 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1099 {
1100 struct ethhdr *eth;
1101
1102 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1103 HH_DATA_OFF(sizeof(*eth)));
1104 plip_rewrite_address (neigh->dev, eth);
1105 }
1106
1107 return ret;
1108 }
1109
1110 /* Open/initialize the board. This is called (in the current kernel)
1111 sometime after booting when the 'ifconfig' program is run.
1112
1113 This routine gets exclusive access to the parallel port by allocating
1114 its IRQ line.
1115 */
1116 static int
plip_open(struct net_device * dev)1117 plip_open(struct net_device *dev)
1118 {
1119 struct net_local *nl = (struct net_local *)dev->priv;
1120 struct in_device *in_dev;
1121
1122 /* Grab the port */
1123 if (!nl->port_owner) {
1124 if (parport_claim(nl->pardev)) return -EAGAIN;
1125 nl->port_owner = 1;
1126 }
1127
1128 nl->should_relinquish = 0;
1129
1130 /* Clear the data port. */
1131 write_data (dev, 0x00);
1132
1133 /* Enable rx interrupt. */
1134 enable_parport_interrupts (dev);
1135 if (dev->irq == -1)
1136 {
1137 atomic_set (&nl->kill_timer, 0);
1138 queue_task (&nl->timer, &tq_timer);
1139 }
1140
1141 /* Initialize the state machine. */
1142 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1143 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1144 nl->connection = PLIP_CN_NONE;
1145 nl->is_deferred = 0;
1146
1147 /* Fill in the MAC-level header.
1148 We used to abuse dev->broadcast to store the point-to-point
1149 MAC address, but we no longer do it. Instead, we fetch the
1150 interface address whenever it is needed, which is cheap enough
1151 because we use the hh_cache. Actually, abusing dev->broadcast
1152 didn't work, because when using plip_open the point-to-point
1153 address isn't yet known.
1154 PLIP doesn't have a real MAC address, but we need it to be
1155 DOS compatible, and to properly support taps (otherwise,
1156 when the device address isn't identical to the address of a
1157 received frame, the kernel incorrectly drops it). */
1158
1159 if ((in_dev=dev->ip_ptr) != NULL) {
1160 /* Any address will do - we take the first. We already
1161 have the first two bytes filled with 0xfc, from
1162 plip_init_dev(). */
1163 struct in_ifaddr *ifa=in_dev->ifa_list;
1164 if (ifa != NULL) {
1165 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1166 }
1167 }
1168
1169 netif_start_queue (dev);
1170
1171 return 0;
1172 }
1173
1174 /* The inverse routine to plip_open (). */
1175 static int
plip_close(struct net_device * dev)1176 plip_close(struct net_device *dev)
1177 {
1178 struct net_local *nl = (struct net_local *)dev->priv;
1179 struct plip_local *snd = &nl->snd_data;
1180 struct plip_local *rcv = &nl->rcv_data;
1181
1182 netif_stop_queue (dev);
1183 disable_parport_interrupts(dev);
1184 synchronize_irq();
1185
1186 if (dev->irq == -1)
1187 {
1188 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1189 atomic_set (&nl->kill_timer, 1);
1190 down (&nl->killed_timer_sem);
1191 }
1192
1193 #ifdef NOTDEF
1194 outb(0x00, PAR_DATA(dev));
1195 #endif
1196 nl->is_deferred = 0;
1197 nl->connection = PLIP_CN_NONE;
1198 if (nl->port_owner) {
1199 parport_release(nl->pardev);
1200 nl->port_owner = 0;
1201 }
1202
1203 snd->state = PLIP_PK_DONE;
1204 if (snd->skb) {
1205 dev_kfree_skb(snd->skb);
1206 snd->skb = NULL;
1207 }
1208 rcv->state = PLIP_PK_DONE;
1209 if (rcv->skb) {
1210 kfree_skb(rcv->skb);
1211 rcv->skb = NULL;
1212 }
1213
1214 #ifdef NOTDEF
1215 /* Reset. */
1216 outb(0x00, PAR_CONTROL(dev));
1217 #endif
1218 return 0;
1219 }
1220
1221 static int
plip_preempt(void * handle)1222 plip_preempt(void *handle)
1223 {
1224 struct net_device *dev = (struct net_device *)handle;
1225 struct net_local *nl = (struct net_local *)dev->priv;
1226
1227 /* Stand our ground if a datagram is on the wire */
1228 if (nl->connection != PLIP_CN_NONE) {
1229 nl->should_relinquish = 1;
1230 return 1;
1231 }
1232
1233 nl->port_owner = 0; /* Remember that we released the bus */
1234 return 0;
1235 }
1236
1237 static void
plip_wakeup(void * handle)1238 plip_wakeup(void *handle)
1239 {
1240 struct net_device *dev = (struct net_device *)handle;
1241 struct net_local *nl = (struct net_local *)dev->priv;
1242
1243 if (nl->port_owner) {
1244 /* Why are we being woken up? */
1245 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1246 if (!parport_claim(nl->pardev))
1247 /* bus_owner is already set (but why?) */
1248 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1249 else
1250 return;
1251 }
1252
1253 if (!(dev->flags & IFF_UP))
1254 /* Don't need the port when the interface is down */
1255 return;
1256
1257 if (!parport_claim(nl->pardev)) {
1258 nl->port_owner = 1;
1259 /* Clear the data port. */
1260 write_data (dev, 0x00);
1261 }
1262
1263 return;
1264 }
1265
1266 static struct net_device_stats *
plip_get_stats(struct net_device * dev)1267 plip_get_stats(struct net_device *dev)
1268 {
1269 struct net_local *nl = (struct net_local *)dev->priv;
1270 struct net_device_stats *r = &nl->enet_stats;
1271
1272 return r;
1273 }
1274
1275 static int
plip_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1276 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1277 {
1278 struct net_local *nl = (struct net_local *) dev->priv;
1279 struct plipconf *pc = (struct plipconf *) &rq->ifr_data;
1280
1281 switch(pc->pcmd) {
1282 case PLIP_GET_TIMEOUT:
1283 pc->trigger = nl->trigger;
1284 pc->nibble = nl->nibble;
1285 break;
1286 case PLIP_SET_TIMEOUT:
1287 if(!capable(CAP_NET_ADMIN))
1288 return -EPERM;
1289 nl->trigger = pc->trigger;
1290 nl->nibble = pc->nibble;
1291 break;
1292 default:
1293 return -EOPNOTSUPP;
1294 }
1295 return 0;
1296 }
1297
1298 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1299 static int timid;
1300
1301 MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
1302 MODULE_PARM(timid, "1i");
1303 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1304
1305 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1306
1307 static int inline
plip_searchfor(int list[],int a)1308 plip_searchfor(int list[], int a)
1309 {
1310 int i;
1311 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1312 if (list[i] == a) return 1;
1313 }
1314 return 0;
1315 }
1316
1317 /* plip_attach() is called (by the parport code) when a port is
1318 * available to use. */
plip_attach(struct parport * port)1319 static void plip_attach (struct parport *port)
1320 {
1321 static int i;
1322
1323 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1324 plip_searchfor(parport, port->number)) {
1325 if (i == PLIP_MAX) {
1326 printk(KERN_ERR "plip: too many devices\n");
1327 return;
1328 }
1329 dev_plip[i] = kmalloc(sizeof(struct net_device),
1330 GFP_KERNEL);
1331 if (!dev_plip[i]) {
1332 printk(KERN_ERR "plip: memory squeeze\n");
1333 return;
1334 }
1335 memset(dev_plip[i], 0, sizeof(struct net_device));
1336 sprintf(dev_plip[i]->name, "plip%d", i);
1337 dev_plip[i]->priv = port;
1338 if (plip_init_dev(dev_plip[i],port) ||
1339 register_netdev(dev_plip[i])) {
1340 kfree(dev_plip[i]);
1341 dev_plip[i] = NULL;
1342 } else {
1343 i++;
1344 }
1345 }
1346 }
1347
1348 /* plip_detach() is called (by the parport code) when a port is
1349 * no longer available to use. */
plip_detach(struct parport * port)1350 static void plip_detach (struct parport *port)
1351 {
1352 /* Nothing to do */
1353 }
1354
1355 static struct parport_driver plip_driver = {
1356 name: "plip",
1357 attach: plip_attach,
1358 detach: plip_detach
1359 };
1360
plip_cleanup_module(void)1361 static void __exit plip_cleanup_module (void)
1362 {
1363 int i;
1364
1365 parport_unregister_driver (&plip_driver);
1366
1367 for (i=0; i < PLIP_MAX; i++) {
1368 if (dev_plip[i]) {
1369 struct net_local *nl =
1370 (struct net_local *)dev_plip[i]->priv;
1371 unregister_netdev(dev_plip[i]);
1372 if (nl->port_owner)
1373 parport_release(nl->pardev);
1374 parport_unregister_device(nl->pardev);
1375 kfree(dev_plip[i]->priv);
1376 kfree(dev_plip[i]);
1377 dev_plip[i] = NULL;
1378 }
1379 }
1380 }
1381
1382 #ifndef MODULE
1383
1384 static int parport_ptr;
1385
plip_setup(char * str)1386 static int __init plip_setup(char *str)
1387 {
1388 int ints[4];
1389
1390 str = get_options(str, ARRAY_SIZE(ints), ints);
1391
1392 /* Ugh. */
1393 if (!strncmp(str, "parport", 7)) {
1394 int n = simple_strtoul(str+7, NULL, 10);
1395 if (parport_ptr < PLIP_MAX)
1396 parport[parport_ptr++] = n;
1397 else
1398 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1399 str);
1400 } else if (!strcmp(str, "timid")) {
1401 timid = 1;
1402 } else {
1403 if (ints[0] == 0 || ints[1] == 0) {
1404 /* disable driver on "plip=" or "plip=0" */
1405 parport[0] = -2;
1406 } else {
1407 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1408 ints[1]);
1409 }
1410 }
1411 return 1;
1412 }
1413
1414 __setup("plip=", plip_setup);
1415
1416 #endif /* !MODULE */
1417
plip_init(void)1418 static int __init plip_init (void)
1419 {
1420 if (parport[0] == -2)
1421 return 0;
1422
1423 if (parport[0] != -1 && timid) {
1424 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1425 timid = 0;
1426 }
1427
1428 if (parport_register_driver (&plip_driver)) {
1429 printk (KERN_WARNING "plip: couldn't register driver\n");
1430 return 1;
1431 }
1432
1433 return 0;
1434 }
1435
1436 module_init(plip_init);
1437 module_exit(plip_cleanup_module);
1438 MODULE_LICENSE("GPL");
1439
1440 /*
1441 * Local variables:
1442 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1443 * End:
1444 */
1445