1 /* $Id: parport_share.c,v 1.15 1998/01/11 12:06:17 philip Exp $
2  * Parallel-port resource manager code.
3  *
4  * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
5  *          Tim Waugh <tim@cyberelk.demon.co.uk>
6  *          Jose Renau <renau@acm.org>
7  *          Philip Blundell <philb@gnu.org>
8  *	    Andrea Arcangeli
9  *
10  * based on work by Grant Guenther <grant@torque.net>
11  *          and Philip Blundell
12  *
13  * Any part of this program may be used in documents licensed under
14  * the GNU Free Documentation License, Version 1.1 or any later version
15  * published by the Free Software Foundation.
16  */
17 
18 #undef PARPORT_DEBUG_SHARING		/* undef for production */
19 
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/string.h>
23 #include <linux/threads.h>
24 #include <linux/parport.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 #include <linux/ioport.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/sched.h>
32 #include <linux/kmod.h>
33 
34 #include <linux/spinlock.h>
35 #include <asm/irq.h>
36 
37 #undef PARPORT_PARANOID
38 
39 #define PARPORT_DEFAULT_TIMESLICE	(HZ/5)
40 
41 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
42 int parport_default_spintime =  DEFAULT_SPIN_TIME;
43 
44 static struct parport *portlist = NULL, *portlist_tail = NULL;
45 static spinlock_t parportlist_lock = SPIN_LOCK_UNLOCKED;
46 
47 static struct parport_driver *driver_chain = NULL;
48 static spinlock_t driverlist_lock = SPIN_LOCK_UNLOCKED;
49 
50 /* What you can do to a port that's gone away.. */
dead_write_lines(struct parport * p,unsigned char b)51 static void dead_write_lines (struct parport *p, unsigned char b){}
dead_read_lines(struct parport * p)52 static unsigned char dead_read_lines (struct parport *p) { return 0; }
dead_frob_lines(struct parport * p,unsigned char b,unsigned char c)53 static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
54 			     unsigned char c) { return 0; }
dead_onearg(struct parport * p)55 static void dead_onearg (struct parport *p){}
dead_initstate(struct pardevice * d,struct parport_state * s)56 static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
dead_state(struct parport * p,struct parport_state * s)57 static void dead_state (struct parport *p, struct parport_state *s) { }
dead_noargs(void)58 static void dead_noargs (void) { }
dead_write(struct parport * p,const void * b,size_t l,int f)59 static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
60 { return 0; }
dead_read(struct parport * p,void * b,size_t l,int f)61 static size_t dead_read (struct parport *p, void *b, size_t l, int f)
62 { return 0; }
63 static struct parport_operations dead_ops = {
64 	dead_write_lines,	/* data */
65 	dead_read_lines,
66 	dead_write_lines,	/* control */
67 	dead_read_lines,
68 	dead_frob_lines,
69 	dead_read_lines,	/* status */
70 	dead_onearg,		/* enable_irq */
71 	dead_onearg,		/* disable_irq */
72 	dead_onearg,		/* data_forward */
73 	dead_onearg,		/* data_reverse */
74 	dead_initstate,		/* init_state */
75 	dead_state,
76 	dead_state,
77 	dead_noargs,		/* xxx_use_count */
78 	dead_noargs,
79 	dead_write,		/* epp */
80 	dead_read,
81 	dead_write,
82 	dead_read,
83 	dead_write,		/* ecp */
84 	dead_read,
85 	dead_write,
86 	dead_write,		/* compat */
87 	dead_read,		/* nibble */
88 	dead_read		/* byte */
89 };
90 
91 /* Call attach(port) for each registered driver. */
attach_driver_chain(struct parport * port)92 static void attach_driver_chain(struct parport *port)
93 {
94 	struct parport_driver *drv;
95 	void (**attach) (struct parport *);
96 	int count = 0, i;
97 
98 	/* This is complicated because attach() must be able to block,
99 	 * but we can't let it do that while we're holding a
100 	 * spinlock. */
101 
102 	spin_lock (&driverlist_lock);
103 	for (drv = driver_chain; drv; drv = drv->next)
104 		count++;
105 	spin_unlock (&driverlist_lock);
106 
107 	/* Drivers can unregister here; that's okay.  If they register
108 	 * they'll be given an attach during parport_register_driver,
109 	 * so that's okay too.  The only worry is that someone might
110 	 * get given an attach twice if they registered just before
111 	 * this function gets called. */
112 
113 	/* Hmm, this could be fixed with a generation number..
114 	 * FIXME */
115 
116 	attach = kmalloc (sizeof (void(*)(struct parport *)) * count,
117 			  GFP_KERNEL);
118 	if (!attach) {
119 		printk (KERN_WARNING "parport: not enough memory to attach\n");
120 		return;
121 	}
122 
123 	spin_lock (&driverlist_lock);
124 	for (i = 0, drv = driver_chain; drv && i < count; drv = drv->next)
125 		attach[i++] = drv->attach;
126 	spin_unlock (&driverlist_lock);
127 
128 	for (count = 0; count < i; count++)
129 		(*attach[count]) (port);
130 
131 	kfree (attach);
132 }
133 
134 /* Call detach(port) for each registered driver. */
detach_driver_chain(struct parport * port)135 static void detach_driver_chain(struct parport *port)
136 {
137 	struct parport_driver *drv;
138 
139 	spin_lock (&driverlist_lock);
140 	for (drv = driver_chain; drv; drv = drv->next)
141 		drv->detach (port);
142 	spin_unlock (&driverlist_lock);
143 }
144 
145 /* Ask kmod for some lowlevel drivers. */
get_lowlevel_driver(void)146 static void get_lowlevel_driver (void)
147 {
148 	/* There is no actual module called this: you should set
149 	 * up an alias for modutils. */
150 	request_module ("parport_lowlevel");
151 }
152 
153 /**
154  *	parport_register_driver - register a parallel port device driver
155  *	@drv: structure describing the driver
156  *
157  *	This can be called by a parallel port device driver in order
158  *	to receive notifications about ports being found in the
159  *	system, as well as ports no longer available.
160  *
161  *	The @drv structure is allocated by the caller and must not be
162  *	deallocated until after calling parport_unregister_driver().
163  *
164  *	The driver's attach() function may block.  The port that
165  *	attach() is given will be valid for the duration of the
166  *	callback, but if the driver wants to take a copy of the
167  *	pointer it must call parport_get_port() to do so.  Calling
168  *	parport_register_device() on that port will do this for you.
169  *
170  *	The driver's detach() function may not block.  The port that
171  *	detach() is given will be valid for the duration of the
172  *	callback, but if the driver wants to take a copy of the
173  *	pointer it must call parport_get_port() to do so.
174  *
175  *	Returns 0 on success.  Currently it always succeeds.
176  **/
177 
parport_register_driver(struct parport_driver * drv)178 int parport_register_driver (struct parport_driver *drv)
179 {
180 	struct parport *port;
181 	struct parport **ports;
182 	int count = 0, i;
183 
184 	if (!portlist)
185 		get_lowlevel_driver ();
186 
187 	/* We have to take the portlist lock for this to be sure
188 	 * that port is valid for the duration of the callback. */
189 
190 	/* This is complicated by the fact that attach must be allowed
191 	 * to block, so we can't be holding any spinlocks when we call
192 	 * it.  But we need to hold a spinlock to iterate over the
193 	 * list of ports.. */
194 
195 	spin_lock (&parportlist_lock);
196 	for (port = portlist; port; port = port->next)
197 		count++;
198 	spin_unlock (&parportlist_lock);
199 
200 	ports = kmalloc (sizeof (struct parport *) * count, GFP_KERNEL);
201 	if (!ports)
202 		printk (KERN_WARNING "parport: not enough memory to attach\n");
203 	else {
204 		spin_lock (&parportlist_lock);
205 		for (i = 0, port = portlist; port && i < count;
206 		     port = port->next)
207 			ports[i++] = port;
208 		spin_unlock (&parportlist_lock);
209 
210 		for (count = 0; count < i; count++)
211 			drv->attach (ports[count]);
212 
213 		kfree (ports);
214 	}
215 
216 	spin_lock (&driverlist_lock);
217 	drv->next = driver_chain;
218 	driver_chain = drv;
219 	spin_unlock (&driverlist_lock);
220 
221 	return 0;
222 }
223 
224 /**
225  *	parport_unregister_driver - deregister a parallel port device driver
226  *	@arg: structure describing the driver that was given to
227  *	      parport_register_driver()
228  *
229  *	This should be called by a parallel port device driver that
230  *	has registered itself using parport_register_driver() when it
231  *	is about to be unloaded.
232  *
233  *	When it returns, the driver's attach() routine will no longer
234  *	be called, and for each port that attach() was called for, the
235  *	detach() routine will have been called.
236  *
237  *	If the caller's attach() function can block, it is their
238  *	responsibility to make sure to wait for it to exit before
239  *	unloading.
240  *
241  *	All the driver's detach() calls are guaranteed to have
242  *	finished by the time this function returns.
243  *
244  *	The driver's detach() call is not allowed to block.
245  **/
246 
parport_unregister_driver(struct parport_driver * arg)247 void parport_unregister_driver (struct parport_driver *arg)
248 {
249 	struct parport_driver *drv = driver_chain, *olddrv = NULL;
250 
251 	while (drv) {
252 		if (drv == arg) {
253 			struct parport *port;
254 
255 			spin_lock (&driverlist_lock);
256 			if (olddrv)
257 				olddrv->next = drv->next;
258 			else
259 				driver_chain = drv->next;
260 			spin_unlock (&driverlist_lock);
261 
262 			/* Call the driver's detach routine for each
263 			 * port to clean up any resources that the
264 			 * attach routine acquired. */
265 			spin_lock (&parportlist_lock);
266 			for (port = portlist; port; port = port->next)
267 				drv->detach (port);
268 			spin_unlock (&parportlist_lock);
269 
270 			return;
271 		}
272 		olddrv = drv;
273 		drv = drv->next;
274 	}
275 }
276 
free_port(struct parport * port)277 static void free_port (struct parport *port)
278 {
279 	int d;
280 	for (d = 0; d < 5; d++) {
281 		if (port->probe_info[d].class_name)
282 			kfree (port->probe_info[d].class_name);
283 		if (port->probe_info[d].mfr)
284 			kfree (port->probe_info[d].mfr);
285 		if (port->probe_info[d].model)
286 			kfree (port->probe_info[d].model);
287 		if (port->probe_info[d].cmdset)
288 			kfree (port->probe_info[d].cmdset);
289 		if (port->probe_info[d].description)
290 			kfree (port->probe_info[d].description);
291 	}
292 
293 	kfree(port->name);
294 	kfree(port);
295 }
296 
297 /**
298  *	parport_get_port - increment a port's reference count
299  *	@port: the port
300  *
301  *	This ensure's that a struct parport pointer remains valid
302  *	until the matching parport_put_port() call.
303  **/
304 
parport_get_port(struct parport * port)305 struct parport *parport_get_port (struct parport *port)
306 {
307 	atomic_inc (&port->ref_count);
308 	return port;
309 }
310 
311 /**
312  *	parport_put_port - decrement a port's reference count
313  *	@port: the port
314  *
315  *	This should be called once for each call to parport_get_port(),
316  *	once the port is no longer needed.
317  **/
318 
parport_put_port(struct parport * port)319 void parport_put_port (struct parport *port)
320 {
321 	if (atomic_dec_and_test (&port->ref_count))
322 		/* Can destroy it now. */
323 		free_port (port);
324 
325 	return;
326 }
327 
328 /**
329  *	parport_enumerate - return a list of the system's parallel ports
330  *
331  *	This returns the head of the list of parallel ports in the
332  *	system, as a &struct parport.  The structure that is returned
333  *	describes the first port in the list, and its 'next' member
334  *	points to the next port, or %NULL if it's the last port.
335  *
336  *	If there are no parallel ports in the system,
337  *	parport_enumerate() will return %NULL.
338  **/
339 
parport_enumerate(void)340 struct parport *parport_enumerate(void)
341 {
342 	/* Don't use this: use parport_register_driver instead. */
343 
344 	if (!portlist)
345 		get_lowlevel_driver ();
346 
347 	return portlist;
348 }
349 
350 /**
351  *	parport_register_port - register a parallel port
352  *	@base: base I/O address
353  *	@irq: IRQ line
354  *	@dma: DMA channel
355  *	@ops: pointer to the port driver's port operations structure
356  *
357  *	When a parallel port (lowlevel) driver finds a port that
358  *	should be made available to parallel port device drivers, it
359  *	should call parport_register_port().  The @base, @irq, and
360  *	@dma parameters are for the convenience of port drivers, and
361  *	for ports where they aren't meaningful needn't be set to
362  *	anything special.  They can be altered afterwards by adjusting
363  *	the relevant members of the parport structure that is returned
364  *	and represents the port.  They should not be tampered with
365  *	after calling parport_announce_port, however.
366  *
367  *	If there are parallel port device drivers in the system that
368  *	have registered themselves using parport_register_driver(),
369  *	they are not told about the port at this time; that is done by
370  *	parport_announce_port().
371  *
372  *	The @ops structure is allocated by the caller, and must not be
373  *	deallocated before calling parport_unregister_port().
374  *
375  *	If there is no memory to allocate a new parport structure,
376  *	this function will return %NULL.
377  **/
378 
parport_register_port(unsigned long base,int irq,int dma,struct parport_operations * ops)379 struct parport *parport_register_port(unsigned long base, int irq, int dma,
380 				      struct parport_operations *ops)
381 {
382 	struct parport *tmp;
383 	int portnum;
384 	int device;
385 	char *name;
386 
387 	tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
388 	if (!tmp) {
389 		printk(KERN_WARNING "parport: memory squeeze\n");
390 		return NULL;
391 	}
392 
393 	/* Search for the lowest free parport number. */
394 
395 	spin_lock_irq (&parportlist_lock);
396 	for (portnum = 0; ; portnum++) {
397 		struct parport *itr = portlist;
398 		while (itr) {
399 			if (itr->number == portnum)
400 				/* No good, already used. */
401 				break;
402 			else
403 				itr = itr->next;
404 		}
405 
406 		if (itr == NULL)
407 			/* Got to the end of the list. */
408 			break;
409 	}
410 	spin_unlock_irq (&parportlist_lock);
411 
412 	/* Init our structure */
413  	memset(tmp, 0, sizeof(struct parport));
414 	tmp->base = base;
415 	tmp->irq = irq;
416 	tmp->dma = dma;
417 	tmp->muxport = tmp->daisy = tmp->muxsel = -1;
418 	tmp->modes = 0;
419  	tmp->next = NULL;
420 	tmp->devices = tmp->cad = NULL;
421 	tmp->flags = 0;
422 	tmp->ops = ops;
423 	tmp->portnum = tmp->number = portnum;
424 	tmp->physport = tmp;
425 	memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
426 	tmp->cad_lock = RW_LOCK_UNLOCKED;
427 	spin_lock_init(&tmp->waitlist_lock);
428 	spin_lock_init(&tmp->pardevice_lock);
429 	tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
430 	tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
431 	init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */
432 	tmp->spintime = parport_default_spintime;
433 	atomic_set (&tmp->ref_count, 1);
434 
435 	name = kmalloc(15, GFP_KERNEL);
436 	if (!name) {
437 		printk(KERN_ERR "parport: memory squeeze\n");
438 		kfree(tmp);
439 		return NULL;
440 	}
441 	sprintf(name, "parport%d", portnum);
442 	tmp->name = name;
443 
444 	/*
445 	 * Chain the entry to our list.
446 	 *
447 	 * This function must not run from an irq handler so we don' t need
448 	 * to clear irq on the local CPU. -arca
449 	 */
450 
451 	spin_lock(&parportlist_lock);
452 
453 	/* We are locked against anyone else performing alterations, but
454 	 * because of parport_enumerate people can still _read_ the list
455 	 * while we are changing it; so be careful..
456 	 *
457 	 * It's okay to have portlist_tail a little bit out of sync
458 	 * since it's only used for changing the list, not for reading
459 	 * from it.
460 	 */
461 
462 	if (portlist_tail)
463 		portlist_tail->next = tmp;
464 	portlist_tail = tmp;
465 	if (!portlist)
466 		portlist = tmp;
467 	spin_unlock(&parportlist_lock);
468 
469 	for (device = 0; device < 5; device++)
470 		/* assume the worst */
471 		tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
472 
473 	tmp->waithead = tmp->waittail = NULL;
474 
475 	return tmp;
476 }
477 
478 /**
479  *	parport_announce_port - tell device drivers about a parallel port
480  *	@port: parallel port to announce
481  *
482  *	After a port driver has registered a parallel port with
483  *	parport_register_port, and performed any necessary
484  *	initialisation or adjustments, it should call
485  *	parport_announce_port() in order to notify all device drivers
486  *	that have called parport_register_driver().  Their attach()
487  *	functions will be called, with @port as the parameter.
488  **/
489 
parport_announce_port(struct parport * port)490 void parport_announce_port (struct parport *port)
491 {
492 #ifdef CONFIG_PARPORT_1284
493 	/* Analyse the IEEE1284.3 topology of the port. */
494 	if (parport_daisy_init (port) == 0) {
495 		/* No devices were detected.  Perhaps they are in some
496                    funny state; let's try to reset them and see if
497                    they wake up. */
498 		parport_daisy_fini (port);
499 		parport_write_control (port, PARPORT_CONTROL_SELECT);
500 		udelay (50);
501 		parport_write_control (port,
502 				       PARPORT_CONTROL_SELECT |
503 				       PARPORT_CONTROL_INIT);
504 		udelay (50);
505 		parport_daisy_init (port);
506 	}
507 #endif
508 
509 	/* Let drivers know that a new port has arrived. */
510 	attach_driver_chain (port);
511 }
512 
513 /**
514  *	parport_unregister_port - deregister a parallel port
515  *	@port: parallel port to deregister
516  *
517  *	When a parallel port driver is forcibly unloaded, or a
518  *	parallel port becomes inaccessible, the port driver must call
519  *	this function in order to deal with device drivers that still
520  *	want to use it.
521  *
522  *	The parport structure associated with the port has its
523  *	operations structure replaced with one containing 'null'
524  *	operations that return errors or just don't do anything.
525  *
526  *	Any drivers that have registered themselves using
527  *	parport_register_driver() are notified that the port is no
528  *	longer accessible by having their detach() routines called
529  *	with @port as the parameter.
530  **/
531 
parport_unregister_port(struct parport * port)532 void parport_unregister_port(struct parport *port)
533 {
534 	struct parport *p;
535 
536 	port->ops = &dead_ops;
537 
538 	/* Spread the word. */
539 	detach_driver_chain (port);
540 
541 #ifdef CONFIG_PARPORT_1284
542 	/* Forget the IEEE1284.3 topology of the port. */
543 	parport_daisy_fini (port);
544 #endif
545 
546 	spin_lock(&parportlist_lock);
547 
548 	/* We are protected from other people changing the list, but
549 	 * they can still see it (using parport_enumerate).  So be
550 	 * careful about the order of writes.. */
551 	if (portlist == port) {
552 		if ((portlist = port->next) == NULL)
553 			portlist_tail = NULL;
554 	} else {
555 		for (p = portlist; (p != NULL) && (p->next != port);
556 		     p=p->next);
557 		if (p) {
558 			if ((p->next = port->next) == NULL)
559 				portlist_tail = p;
560 		}
561 		else printk (KERN_WARNING
562 			     "%s not found in port list!\n", port->name);
563 	}
564 	spin_unlock(&parportlist_lock);
565 
566 	/* Yes, parport_enumerate _is_ unsafe.  Don't use it. */
567 	parport_put_port (port);
568 }
569 
570 /**
571  *	parport_register_device - register a device on a parallel port
572  *	@port: port to which the device is attached
573  *	@name: a name to refer to the device
574  *	@pf: preemption callback
575  *	@kf: kick callback (wake-up)
576  *	@irq_func: interrupt handler
577  *	@flags: registration flags
578  *	@handle: data for callback functions
579  *
580  *	This function, called by parallel port device drivers,
581  *	declares that a device is connected to a port, and tells the
582  *	system all it needs to know.
583  *
584  *	The @name is allocated by the caller and must not be
585  *	deallocated until the caller calls @parport_unregister_device
586  *	for that device.
587  *
588  *	The preemption callback function, @pf, is called when this
589  *	device driver has claimed access to the port but another
590  *	device driver wants to use it.  It is given @handle as its
591  *	parameter, and should return zero if it is willing for the
592  *	system to release the port to another driver on its behalf.
593  *	If it wants to keep control of the port it should return
594  *	non-zero, and no action will be taken.  It is good manners for
595  *	the driver to try to release the port at the earliest
596  *	opportunity after its preemption callback rejects a preemption
597  *	attempt.  Note that if a preemption callback is happy for
598  *	preemption to go ahead, there is no need to release the port;
599  *	it is done automatically.  This function may not block, as it
600  *	may be called from interrupt context.  If the device driver
601  *	does not support preemption, @pf can be %NULL.
602  *
603  *	The wake-up ("kick") callback function, @kf, is called when
604  *	the port is available to be claimed for exclusive access; that
605  *	is, parport_claim() is guaranteed to succeed when called from
606  *	inside the wake-up callback function.  If the driver wants to
607  *	claim the port it should do so; otherwise, it need not take
608  *	any action.  This function may not block, as it may be called
609  *	from interrupt context.  If the device driver does not want to
610  *	be explicitly invited to claim the port in this way, @kf can
611  *	be %NULL.
612  *
613  *	The interrupt handler, @irq_func, is called when an interrupt
614  *	arrives from the parallel port.  Note that if a device driver
615  *	wants to use interrupts it should use parport_enable_irq(),
616  *	and can also check the irq member of the parport structure
617  *	representing the port.
618  *
619  *	The parallel port (lowlevel) driver is the one that has called
620  *	request_irq() and whose interrupt handler is called first.
621  *	This handler does whatever needs to be done to the hardware to
622  *	acknowledge the interrupt (for PC-style ports there is nothing
623  *	special to be done).  It then tells the IEEE 1284 code about
624  *	the interrupt, which may involve reacting to an IEEE 1284
625  *	event depending on the current IEEE 1284 phase.  After this,
626  *	it calls @irq_func.  Needless to say, @irq_func will be called
627  *	from interrupt context, and may not block.
628  *
629  *	The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
630  *	so should only be used when sharing the port with other device
631  *	drivers is impossible and would lead to incorrect behaviour.
632  *	Use it sparingly!  Normally, @flags will be zero.
633  *
634  *	This function returns a pointer to a structure that represents
635  *	the device on the port, or %NULL if there is not enough memory
636  *	to allocate space for that structure.
637  **/
638 
639 struct pardevice *
parport_register_device(struct parport * port,const char * name,int (* pf)(void *),void (* kf)(void *),void (* irq_func)(int,void *,struct pt_regs *),int flags,void * handle)640 parport_register_device(struct parport *port, const char *name,
641 			int (*pf)(void *), void (*kf)(void *),
642 			void (*irq_func)(int, void *, struct pt_regs *),
643 			int flags, void *handle)
644 {
645 	struct pardevice *tmp;
646 
647 	if (port->physport->flags & PARPORT_FLAG_EXCL) {
648 		/* An exclusive device is registered. */
649 		printk (KERN_DEBUG "%s: no more devices allowed\n",
650 			port->name);
651 		return NULL;
652 	}
653 
654 	if (flags & PARPORT_DEV_LURK) {
655 		if (!pf || !kf) {
656 			printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
657 			return NULL;
658 		}
659 	}
660 
661 	/* We up our own module reference count, and that of the port
662            on which a device is to be registered, to ensure that
663            neither of us gets unloaded while we sleep in (e.g.)
664            kmalloc.  To be absolutely safe, we have to require that
665            our caller doesn't sleep in between parport_enumerate and
666            parport_register_device.. */
667 	inc_parport_count();
668 	port->ops->inc_use_count();
669 	parport_get_port (port);
670 
671 	tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
672 	if (tmp == NULL) {
673 		printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
674 		goto out;
675 	}
676 
677 	tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
678 	if (tmp->state == NULL) {
679 		printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
680 		goto out_free_pardevice;
681 	}
682 
683 	tmp->name = name;
684 	tmp->port = port;
685 	tmp->daisy = -1;
686 	tmp->preempt = pf;
687 	tmp->wakeup = kf;
688 	tmp->private = handle;
689 	tmp->flags = flags;
690 	tmp->irq_func = irq_func;
691 	tmp->waiting = 0;
692 	tmp->timeout = 5 * HZ;
693 
694 	/* Chain this onto the list */
695 	tmp->prev = NULL;
696 	/*
697 	 * This function must not run from an irq handler so we don' t need
698 	 * to clear irq on the local CPU. -arca
699 	 */
700 	spin_lock(&port->physport->pardevice_lock);
701 
702 	if (flags & PARPORT_DEV_EXCL) {
703 		if (port->physport->devices) {
704 			spin_unlock (&port->physport->pardevice_lock);
705 			printk (KERN_DEBUG
706 				"%s: cannot grant exclusive access for "
707 				"device %s\n", port->name, name);
708 			goto out_free_all;
709 		}
710 		port->flags |= PARPORT_FLAG_EXCL;
711 	}
712 
713 	tmp->next = port->physport->devices;
714 	wmb(); /* Make sure that tmp->next is written before it's
715                   added to the list; see comments marked 'no locking
716                   required' */
717 	if (port->physport->devices)
718 		port->physport->devices->prev = tmp;
719 	port->physport->devices = tmp;
720 	spin_unlock(&port->physport->pardevice_lock);
721 
722 	init_waitqueue_head(&tmp->wait_q);
723 	tmp->timeslice = parport_default_timeslice;
724 	tmp->waitnext = tmp->waitprev = NULL;
725 
726 	/*
727 	 * This has to be run as last thing since init_state may need other
728 	 * pardevice fields. -arca
729 	 */
730 	port->ops->init_state(tmp, tmp->state);
731 	parport_device_proc_register(tmp);
732 	return tmp;
733 
734  out_free_all:
735 	kfree (tmp->state);
736  out_free_pardevice:
737 	kfree (tmp);
738  out:
739 	dec_parport_count();
740 	port->ops->dec_use_count();
741 	parport_put_port (port);
742 	return NULL;
743 }
744 
745 /**
746  *	parport_unregister_device - deregister a device on a parallel port
747  *	@dev: pointer to structure representing device
748  *
749  *	This undoes the effect of parport_register_device().
750  **/
751 
parport_unregister_device(struct pardevice * dev)752 void parport_unregister_device(struct pardevice *dev)
753 {
754 	struct parport *port;
755 
756 #ifdef PARPORT_PARANOID
757 	if (dev == NULL) {
758 		printk(KERN_ERR "parport_unregister_device: passed NULL\n");
759 		return;
760 	}
761 #endif
762 
763 	parport_device_proc_unregister(dev);
764 
765 	port = dev->port->physport;
766 
767 	if (port->cad == dev) {
768 		printk(KERN_DEBUG "%s: %s forgot to release port\n",
769 		       port->name, dev->name);
770 		parport_release (dev);
771 	}
772 
773 	spin_lock(&port->pardevice_lock);
774 	if (dev->next)
775 		dev->next->prev = dev->prev;
776 	if (dev->prev)
777 		dev->prev->next = dev->next;
778 	else
779 		port->devices = dev->next;
780 
781 	if (dev->flags & PARPORT_DEV_EXCL)
782 		port->flags &= ~PARPORT_FLAG_EXCL;
783 
784 	spin_unlock(&port->pardevice_lock);
785 
786 	/* Make sure we haven't left any pointers around in the wait
787 	 * list. */
788 	spin_lock (&port->waitlist_lock);
789 	if (dev->waitprev || dev->waitnext || port->waithead == dev) {
790 		if (dev->waitprev)
791 			dev->waitprev->waitnext = dev->waitnext;
792 		else
793 			port->waithead = dev->waitnext;
794 		if (dev->waitnext)
795 			dev->waitnext->waitprev = dev->waitprev;
796 		else
797 			port->waittail = dev->waitprev;
798 	}
799 	spin_unlock (&port->waitlist_lock);
800 
801 	kfree(dev->state);
802 	kfree(dev);
803 
804 	dec_parport_count();
805 	port->ops->dec_use_count();
806 	parport_put_port (port);
807 
808 	/* Yes, that's right, someone _could_ still have a pointer to
809 	 * port, if they used parport_enumerate.  That's why they
810 	 * shouldn't use it (and use parport_register_driver instead)..
811 	 */
812 }
813 
814 /**
815  *	parport_find_number - find a parallel port by number
816  *	@number: parallel port number
817  *
818  *	This returns the parallel port with the specified number, or
819  *	%NULL if there is none.
820  *
821  *	There is an implicit parport_get_port() done already; to throw
822  *	away the reference to the port that parport_find_number()
823  *	gives you, use parport_put_port().
824  */
825 
parport_find_number(int number)826 struct parport *parport_find_number (int number)
827 {
828 	struct parport *port, *result = NULL;
829 
830 	if (!portlist)
831 		get_lowlevel_driver ();
832 
833 	spin_lock (&parportlist_lock);
834 	for (port = portlist; port; port = port->next)
835 		if (port->number == number) {
836 			result = parport_get_port (port);
837 			break;
838 		}
839 	spin_unlock (&parportlist_lock);
840 	return result;
841 }
842 
843 /**
844  *	parport_find_base - find a parallel port by base address
845  *	@base: base I/O address
846  *
847  *	This returns the parallel port with the specified base
848  *	address, or %NULL if there is none.
849  *
850  *	There is an implicit parport_get_port() done already; to throw
851  *	away the reference to the port that parport_find_base()
852  *	gives you, use parport_put_port().
853  */
854 
parport_find_base(unsigned long base)855 struct parport *parport_find_base (unsigned long base)
856 {
857 	struct parport *port, *result = NULL;
858 
859 	if (!portlist)
860 		get_lowlevel_driver ();
861 
862 	spin_lock (&parportlist_lock);
863 	for (port = portlist; port; port = port->next)
864 		if (port->base == base) {
865 			result = parport_get_port (port);
866 			break;
867 		}
868 	spin_unlock (&parportlist_lock);
869 	return result;
870 }
871 
872 /**
873  *	parport_claim - claim access to a parallel port device
874  *	@dev: pointer to structure representing a device on the port
875  *
876  *	This function will not block and so can be used from interrupt
877  *	context.  If parport_claim() succeeds in claiming access to
878  *	the port it returns zero and the port is available to use.  It
879  *	may fail (returning non-zero) if the port is in use by another
880  *	driver and that driver is not willing to relinquish control of
881  *	the port.
882  **/
883 
parport_claim(struct pardevice * dev)884 int parport_claim(struct pardevice *dev)
885 {
886 	struct pardevice *oldcad;
887 	struct parport *port = dev->port->physport;
888 	unsigned long flags;
889 
890 	if (port->cad == dev) {
891 		printk(KERN_INFO "%s: %s already owner\n",
892 		       dev->port->name,dev->name);
893 		return 0;
894 	}
895 
896 	/* Preempt any current device */
897 	write_lock_irqsave (&port->cad_lock, flags);
898 	if ((oldcad = port->cad) != NULL) {
899 		if (oldcad->preempt) {
900 			if (oldcad->preempt(oldcad->private))
901 				goto blocked;
902 			port->ops->save_state(port, dev->state);
903 		} else
904 			goto blocked;
905 
906 		if (port->cad != oldcad) {
907 			/* I think we'll actually deadlock rather than
908                            get here, but just in case.. */
909 			printk(KERN_WARNING
910 			       "%s: %s released port when preempted!\n",
911 			       port->name, oldcad->name);
912 			if (port->cad)
913 				goto blocked;
914 		}
915 	}
916 
917 	/* Can't fail from now on, so mark ourselves as no longer waiting.  */
918 	if (dev->waiting & 1) {
919 		dev->waiting = 0;
920 
921 		/* Take ourselves out of the wait list again.  */
922 		spin_lock_irq (&port->waitlist_lock);
923 		if (dev->waitprev)
924 			dev->waitprev->waitnext = dev->waitnext;
925 		else
926 			port->waithead = dev->waitnext;
927 		if (dev->waitnext)
928 			dev->waitnext->waitprev = dev->waitprev;
929 		else
930 			port->waittail = dev->waitprev;
931 		spin_unlock_irq (&port->waitlist_lock);
932 		dev->waitprev = dev->waitnext = NULL;
933 	}
934 
935 	/* Now we do the change of devices */
936 	port->cad = dev;
937 
938 #ifdef CONFIG_PARPORT_1284
939 	/* If it's a mux port, select it. */
940 	if (dev->port->muxport >= 0) {
941 		/* FIXME */
942 		port->muxsel = dev->port->muxport;
943 	}
944 
945 	/* If it's a daisy chain device, select it. */
946 	if (dev->daisy >= 0) {
947 		/* This could be lazier. */
948 		if (!parport_daisy_select (port, dev->daisy,
949 					   IEEE1284_MODE_COMPAT))
950 			port->daisy = dev->daisy;
951 	}
952 #endif /* IEEE1284.3 support */
953 
954 	/* Restore control registers */
955 	port->ops->restore_state(port, dev->state);
956 	write_unlock_irqrestore(&port->cad_lock, flags);
957 	dev->time = jiffies;
958 	return 0;
959 
960 blocked:
961 	/* If this is the first time we tried to claim the port, register an
962 	   interest.  This is only allowed for devices sleeping in
963 	   parport_claim_or_block(), or those with a wakeup function.  */
964 
965 	/* The cad_lock is still held for writing here */
966 	if (dev->waiting & 2 || dev->wakeup) {
967 		spin_lock (&port->waitlist_lock);
968 		if (test_and_set_bit(0, &dev->waiting) == 0) {
969 			/* First add ourselves to the end of the wait list. */
970 			dev->waitnext = NULL;
971 			dev->waitprev = port->waittail;
972 			if (port->waittail) {
973 				port->waittail->waitnext = dev;
974 				port->waittail = dev;
975 			} else
976 				port->waithead = port->waittail = dev;
977 		}
978 		spin_unlock (&port->waitlist_lock);
979 	}
980 	write_unlock_irqrestore (&port->cad_lock, flags);
981 	return -EAGAIN;
982 }
983 
984 /**
985  *	parport_claim_or_block - claim access to a parallel port device
986  *	@dev: pointer to structure representing a device on the port
987  *
988  *	This behaves like parport_claim(), but will block if necessary
989  *	to wait for the port to be free.  A return value of 1
990  *	indicates that it slept; 0 means that it succeeded without
991  *	needing to sleep.  A negative error code indicates failure.
992  **/
993 
parport_claim_or_block(struct pardevice * dev)994 int parport_claim_or_block(struct pardevice *dev)
995 {
996 	int r;
997 
998 	/* Signal to parport_claim() that we can wait even without a
999 	   wakeup function.  */
1000 	dev->waiting = 2;
1001 
1002 	/* Try to claim the port.  If this fails, we need to sleep.  */
1003 	r = parport_claim(dev);
1004 	if (r == -EAGAIN) {
1005 		unsigned long flags;
1006 #ifdef PARPORT_DEBUG_SHARING
1007 		printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
1008 #endif
1009 		save_flags (flags);
1010 		cli();
1011 		/* If dev->waiting is clear now, an interrupt
1012 		   gave us the port and we would deadlock if we slept.  */
1013 		if (dev->waiting) {
1014 			interruptible_sleep_on (&dev->wait_q);
1015 			if (signal_pending (current)) {
1016 				restore_flags (flags);
1017 				return -EINTR;
1018 			}
1019 			r = 1;
1020 		} else {
1021 			r = 0;
1022 #ifdef PARPORT_DEBUG_SHARING
1023 			printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
1024 			       dev->name);
1025 #endif
1026 		}
1027 		restore_flags(flags);
1028 #ifdef PARPORT_DEBUG_SHARING
1029 		if (dev->port->physport->cad != dev)
1030 			printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
1031 			       "but %s owns port!\n", dev->name,
1032 			       dev->port->physport->cad ?
1033 			       dev->port->physport->cad->name:"nobody");
1034 #endif
1035 	}
1036 	dev->waiting = 0;
1037 	return r;
1038 }
1039 
1040 /**
1041  *	parport_release - give up access to a parallel port device
1042  *	@dev: pointer to structure representing parallel port device
1043  *
1044  *	This function cannot fail, but it should not be called without
1045  *	the port claimed.  Similarly, if the port is already claimed
1046  *	you should not try claiming it again.
1047  **/
1048 
parport_release(struct pardevice * dev)1049 void parport_release(struct pardevice *dev)
1050 {
1051 	struct parport *port = dev->port->physport;
1052 	struct pardevice *pd;
1053 	unsigned long flags;
1054 
1055 	/* Make sure that dev is the current device */
1056 	write_lock_irqsave(&port->cad_lock, flags);
1057 	if (port->cad != dev) {
1058 		write_unlock_irqrestore (&port->cad_lock, flags);
1059 		printk(KERN_WARNING "%s: %s tried to release parport "
1060 		       "when not owner\n", port->name, dev->name);
1061 		return;
1062 	}
1063 
1064 #ifdef CONFIG_PARPORT_1284
1065 	/* If this is on a mux port, deselect it. */
1066 	if (dev->port->muxport >= 0) {
1067 		/* FIXME */
1068 		port->muxsel = -1;
1069 	}
1070 
1071 	/* If this is a daisy device, deselect it. */
1072 	if (dev->daisy >= 0) {
1073 		parport_daisy_deselect_all (port);
1074 		port->daisy = -1;
1075 	}
1076 #endif
1077 
1078 	port->cad = NULL;
1079 	write_unlock_irqrestore(&port->cad_lock, flags);
1080 
1081 	/* Save control registers */
1082 	port->ops->save_state(port, dev->state);
1083 
1084 	/* If anybody is waiting, find out who's been there longest and
1085 	   then wake them up. (Note: no locking required) */
1086 	/* !!! LOCKING IS NEEDED HERE */
1087 	for (pd = port->waithead; pd; pd = pd->waitnext) {
1088 		if (pd->waiting & 2) { /* sleeping in claim_or_block */
1089 			parport_claim(pd);
1090 			if (waitqueue_active(&pd->wait_q))
1091 				wake_up_interruptible(&pd->wait_q);
1092 			return;
1093 		} else if (pd->wakeup) {
1094 			pd->wakeup(pd->private);
1095 			if (dev->port->cad) /* racy but no matter */
1096 				return;
1097 		} else {
1098 			printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
1099 		}
1100 	}
1101 
1102 	/* Nobody was waiting, so walk the list to see if anyone is
1103 	   interested in being woken up. (Note: no locking required) */
1104 	/* !!! LOCKING IS NEEDED HERE */
1105 	for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
1106 		if (pd->wakeup && pd != dev)
1107 			pd->wakeup(pd->private);
1108 	}
1109 }
1110 
parport_parse_params(int nports,const char * str[],int val[],int automatic,int none,int nofifo)1111 static int parport_parse_params (int nports, const char *str[], int val[],
1112 				 int automatic, int none, int nofifo)
1113 {
1114 	unsigned int i;
1115 	for (i = 0; i < nports && str[i]; i++) {
1116 		if (!strncmp(str[i], "auto", 4))
1117 			val[i] = automatic;
1118 		else if (!strncmp(str[i], "none", 4))
1119 			val[i] = none;
1120 		else if (nofifo && !strncmp(str[i], "nofifo", 4))
1121 			val[i] = nofifo;
1122 		else {
1123 			char *ep;
1124 			unsigned long r = simple_strtoul(str[i], &ep, 0);
1125 			if (ep != str[i])
1126 				val[i] = r;
1127 			else {
1128 				printk(KERN_ERR "parport: bad specifier `%s'\n", str[i]);
1129 				return -1;
1130 			}
1131 		}
1132 	}
1133 
1134 	return 0;
1135 }
1136 
parport_parse_irqs(int nports,const char * irqstr[],int irqval[])1137 int parport_parse_irqs(int nports, const char *irqstr[], int irqval[])
1138 {
1139 	return parport_parse_params (nports, irqstr, irqval, PARPORT_IRQ_AUTO,
1140 				     PARPORT_IRQ_NONE, 0);
1141 }
1142 
parport_parse_dmas(int nports,const char * dmastr[],int dmaval[])1143 int parport_parse_dmas(int nports, const char *dmastr[], int dmaval[])
1144 {
1145 	return parport_parse_params (nports, dmastr, dmaval, PARPORT_DMA_AUTO,
1146 				     PARPORT_DMA_NONE, PARPORT_DMA_NOFIFO);
1147 }
1148 MODULE_LICENSE("GPL");
1149