1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Code to handle x86 style IRQs plus some generic interrupt stuff.
7  *
8  * Copyright (C) 1992 Linus Torvalds
9  * Copyright (C) 1994 - 2000 Ralf Baechle
10  */
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
20 #include <linux/mm.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 
24 #include <asm/atomic.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 
28 /*
29  * Controller mappings for all interrupt sources:
30  */
31 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
32 	{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
33 
34 static void register_irq_proc (unsigned int irq);
35 
36 /*
37  * Special irq handlers.
38  */
39 
no_action(int cpl,void * dev_id,struct pt_regs * regs)40 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
41 
42 /*
43  * Generic no controller code
44  */
45 
enable_none(unsigned int irq)46 static void enable_none(unsigned int irq) { }
startup_none(unsigned int irq)47 static unsigned int startup_none(unsigned int irq) { return 0; }
disable_none(unsigned int irq)48 static void disable_none(unsigned int irq) { }
ack_none(unsigned int irq)49 static void ack_none(unsigned int irq)
50 {
51 	/*
52 	 * 'what should we do if we get a hw irq event on an illegal vector'.
53 	 * each architecture has to answer this themselves, it doesn't deserve
54 	 * a generic callback i think.
55 	 */
56 	printk("unexpected interrupt %d\n", irq);
57 }
58 
59 /* startup is the same as "enable", shutdown is same as "disable" */
60 #define shutdown_none	disable_none
61 #define end_none	enable_none
62 
63 struct hw_interrupt_type no_irq_type = {
64 	"none",
65 	startup_none,
66 	shutdown_none,
67 	enable_none,
68 	disable_none,
69 	ack_none,
70 	end_none
71 };
72 
73 atomic_t irq_err_count;
74 
75 /*
76  * Generic, controller-independent functions:
77  */
78 
get_irq_list(char * buf)79 int get_irq_list(char *buf)
80 {
81 	int i, j;
82 	struct irqaction * action;
83 	char *p = buf;
84 
85 	p += sprintf(p, "           ");
86 	for (j=0; j<smp_num_cpus; j++)
87 		p += sprintf(p, "CPU%d       ",j);
88 	*p++ = '\n';
89 
90 	for (i = 0 ; i < NR_IRQS ; i++) {
91 		action = irq_desc[i].action;
92 		if (!action)
93 			continue;
94 		p += sprintf(p, "%3d: ",i);
95 #ifndef CONFIG_SMP
96 		p += sprintf(p, "%10u ", kstat_irqs(i));
97 #else
98 		for (j = 0; j < smp_num_cpus; j++)
99 			p += sprintf(p, "%10u ",
100 				kstat.irqs[cpu_logical_map(j)][i]);
101 #endif
102 		p += sprintf(p, " %14s", irq_desc[i].handler->typename);
103 		p += sprintf(p, "  %s", action->name);
104 
105 		for (action=action->next; action; action = action->next)
106 			p += sprintf(p, ", %s", action->name);
107 		*p++ = '\n';
108 	}
109 	p += sprintf(p, "\n");
110 	p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
111 	return p - buf;
112 }
113 
114 #ifdef CONFIG_SMP
115 int global_irq_holder = NO_PROC_ID;
116 spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
117 
118 /*
119  * Most of this code is take from the mips64 tree (ip27-irq.c).  It's virtually
120  * identical to the i386 implentation in arh/i386/irq.c, with translations for
121  * the interrupt enable bit
122  */
123 
124 #define MAXCOUNT 		100000000
125 #define SYNC_OTHER_CORES(x)	udelay(x+1)
126 
wait_on_irq(int cpu)127 static inline void wait_on_irq(int cpu)
128 {
129 	int count = MAXCOUNT;
130 
131 	for (;;) {
132 
133 		/*
134 		 * Wait until all interrupts are gone. Wait
135 		 * for bottom half handlers unless we're
136 		 * already executing in one..
137 		 */
138 		if (!irqs_running())
139 			if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
140 				break;
141 
142 		/* Duh, we have to loop. Release the lock to avoid deadlocks */
143 		spin_unlock(&global_irq_lock);
144 
145 		for (;;) {
146 			if (!--count) {
147 				printk("Count spun out.  Huh?\n");
148 				count = ~0;
149 			}
150 			__sti();
151 			SYNC_OTHER_CORES(cpu);
152 			__cli();
153 			if (irqs_running())
154 				continue;
155 			if (spin_is_locked(&global_irq_lock))
156 				continue;
157 			if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
158 				continue;
159 			if (spin_trylock(&global_irq_lock))
160 				break;
161 		}
162 	}
163 }
164 
165 /*
166  * This is called when we want to synchronize with
167  * interrupts. We may for example tell a device to
168  * stop sending interrupts: but to make sure there
169  * are no interrupts that are executing on another
170  * CPU we need to call this function.
171  */
synchronize_irq(void)172 void synchronize_irq(void)
173 {
174 	if (irqs_running()) {
175 		/* Stupid approach */
176 		cli();
177 		sti();
178 	}
179 }
180 
get_irqlock(int cpu)181 static inline void get_irqlock(int cpu)
182 {
183 	if (!spin_trylock(&global_irq_lock)) {
184 		/* do we already hold the lock? */
185 		if ((unsigned char) cpu == global_irq_holder)
186 			return;
187 		/* Uhhuh.. Somebody else got it. Wait.. */
188 		spin_lock(&global_irq_lock);
189 	}
190 	/*
191 	 * We also to make sure that nobody else is running
192 	 * in an interrupt context.
193 	 */
194 	wait_on_irq(cpu);
195 
196 	/*
197 	 * Ok, finally..
198 	 */
199 	global_irq_holder = cpu;
200 }
201 
202 /*
203  * A global "cli()" while in an interrupt context turns into just a local
204  * cli(). Interrupts should use spinlocks for the (very unlikely) case that
205  * they ever want to protect against each other.
206  *
207  * If we already have local interrupts disabled, this will not turn a local
208  * disable into a global one (problems with spinlocks: this makes
209  * save_flags+cli+sti usable inside a spinlock).
210  */
211 
__global_cli(void)212 void __global_cli(void)
213 {
214 	unsigned long flags;
215 
216 	__save_flags(flags);
217 	if (flags & ST0_IE) {
218 		int cpu = smp_processor_id();
219 		__cli();
220 		if (!local_irq_count(cpu))
221 			get_irqlock(cpu);
222 	}
223 }
224 
__global_sti(void)225 void __global_sti(void)
226 {
227 	int cpu = smp_processor_id();
228 
229 	if (!local_irq_count(cpu))
230 		release_irqlock(cpu);
231 	__sti();
232 }
233 
234 /*
235  * SMP flags value to restore to:
236  * 0 - global cli
237  * 1 - global sti
238  * 2 - local cli
239  * 3 - local sti
240  */
__global_save_flags(void)241 unsigned long __global_save_flags(void)
242 {
243 	int retval;
244 	int local_enabled;
245 	unsigned long flags;
246 	int cpu = smp_processor_id();
247 
248 	__save_flags(flags);
249 	local_enabled = (flags & ST0_IE);
250 	/* default to local */
251 	retval = 2 + local_enabled;
252 
253 	/* check for global flags if we're not in an interrupt */
254 	if (!local_irq_count(cpu)) {
255 		if (local_enabled)
256 			retval = 1;
257 		if (global_irq_holder == cpu)
258 			retval = 0;
259 	}
260 
261 	return retval;
262 }
263 
__global_restore_flags(unsigned long flags)264 void __global_restore_flags(unsigned long flags)
265 {
266 	switch (flags) {
267 		case 0:
268 			__global_cli();
269 			break;
270 		case 1:
271 			__global_sti();
272 			break;
273 		case 2:
274 			__cli();
275 			break;
276 		case 3:
277 			__sti();
278 			break;
279 		default:
280 			printk("global_restore_flags: %08lx\n", flags);
281 	}
282 }
283 #endif /* CONFIG_SMP */
284 
285 /*
286  * This should really return information about whether
287  * we should do bottom half handling etc. Right now we
288  * end up _always_ checking the bottom half, which is a
289  * waste of time and is not what some drivers would
290  * prefer.
291  */
handle_IRQ_event(unsigned int irq,struct pt_regs * regs,struct irqaction * action)292 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
293 {
294 	int status;
295 	int cpu = smp_processor_id();
296 
297 	irq_enter(cpu, irq);
298 
299 	status = 1;	/* Force the "do bottom halves" bit */
300 
301 	if (!(action->flags & SA_INTERRUPT))
302 		__sti();
303 
304 	do {
305 		status |= action->flags;
306 		action->handler(irq, action->dev_id, regs);
307 		action = action->next;
308 	} while (action);
309 	if (status & SA_SAMPLE_RANDOM)
310 		add_interrupt_randomness(irq);
311 	__cli();
312 
313 	irq_exit(cpu, irq);
314 
315 	return status;
316 }
317 
318 /*
319  * Generic enable/disable code: this just calls
320  * down into the PIC-specific version for the actual
321  * hardware disable after having gotten the irq
322  * controller lock.
323  */
324 
325 /**
326  *	disable_irq_nosync - disable an irq without waiting
327  *	@irq: Interrupt to disable
328  *
329  *	Disable the selected interrupt line. Disables of an interrupt
330  *	stack. Unlike disable_irq(), this function does not ensure existing
331  *	instances of the IRQ handler have completed before returning.
332  *
333  *	This function may be called from IRQ context.
334  */
335 
disable_irq_nosync(unsigned int irq)336 void inline disable_irq_nosync(unsigned int irq)
337 {
338 	irq_desc_t *desc = irq_desc + irq;
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&desc->lock, flags);
342 	if (!desc->depth++) {
343 		desc->status |= IRQ_DISABLED;
344 		desc->handler->disable(irq);
345 	}
346 	spin_unlock_irqrestore(&desc->lock, flags);
347 }
348 
349 /**
350  *	disable_irq - disable an irq and wait for completion
351  *	@irq: Interrupt to disable
352  *
353  *	Disable the selected interrupt line. Disables of an interrupt
354  *	stack. That is for two disables you need two enables. This
355  *	function waits for any pending IRQ handlers for this interrupt
356  *	to complete before returning. If you use this function while
357  *	holding a resource the IRQ handler may need you will deadlock.
358  *
359  *	This function may be called - with care - from IRQ context.
360  */
361 
disable_irq(unsigned int irq)362 void disable_irq(unsigned int irq)
363 {
364 	disable_irq_nosync(irq);
365 
366 	if (!local_irq_count(smp_processor_id())) {
367 		do {
368 			barrier();
369 		} while (irq_desc[irq].status & IRQ_INPROGRESS);
370 	}
371 }
372 
373 /**
374  *	enable_irq - enable interrupt handling on an irq
375  *	@irq: Interrupt to enable
376  *
377  *	Re-enables the processing of interrupts on this IRQ line
378  *	providing no disable_irq calls are now in effect.
379  *
380  *	This function may be called from IRQ context.
381  */
382 
enable_irq(unsigned int irq)383 void enable_irq(unsigned int irq)
384 {
385 	irq_desc_t *desc = irq_desc + irq;
386 	unsigned long flags;
387 
388 	spin_lock_irqsave(&desc->lock, flags);
389 	switch (desc->depth) {
390 	case 1: {
391 		unsigned int status = desc->status & ~IRQ_DISABLED;
392 		desc->status = status;
393 		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
394 			desc->status = status | IRQ_REPLAY;
395 			hw_resend_irq(desc->handler,irq);
396 		}
397 		desc->handler->enable(irq);
398 		/* fall-through */
399 	}
400 	default:
401 		desc->depth--;
402 		break;
403 	case 0:
404 		printk("enable_irq(%u) unbalanced from %p\n", irq,
405 		       __builtin_return_address(0));
406 	}
407 	spin_unlock_irqrestore(&desc->lock, flags);
408 }
409 
410 /*
411  * do_IRQ handles all normal device IRQ's (the special
412  * SMP cross-CPU interrupts have their own specific
413  * handlers).
414  */
do_IRQ(int irq,struct pt_regs * regs)415 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
416 {
417 	/*
418 	 * We ack quickly, we don't want the irq controller
419 	 * thinking we're snobs just because some other CPU has
420 	 * disabled global interrupts (we have already done the
421 	 * INT_ACK cycles, it's too late to try to pretend to the
422 	 * controller that we aren't taking the interrupt).
423 	 *
424 	 * 0 return value means that this irq is already being
425 	 * handled by some other CPU. (or is disabled)
426 	 */
427 	int cpu = smp_processor_id();
428 	irq_desc_t *desc = irq_desc + irq;
429 	struct irqaction * action;
430 	unsigned int status;
431 
432 	kstat.irqs[cpu][irq]++;
433 	spin_lock(&desc->lock);
434 	desc->handler->ack(irq);
435 	/*
436 	   REPLAY is when Linux resends an IRQ that was dropped earlier
437 	   WAITING is used by probe to mark irqs that are being tested
438 	   */
439 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
440 	status |= IRQ_PENDING; /* we _want_ to handle it */
441 
442 	/*
443 	 * If the IRQ is disabled for whatever reason, we cannot
444 	 * use the action we have.
445 	 */
446 	action = NULL;
447 	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
448 		action = desc->action;
449 		status &= ~IRQ_PENDING; /* we commit to handling */
450 		status |= IRQ_INPROGRESS; /* we are handling it */
451 	}
452 	desc->status = status;
453 
454 	/*
455 	 * If there is no IRQ handler or it was disabled, exit early.
456 	   Since we set PENDING, if another processor is handling
457 	   a different instance of this same irq, the other processor
458 	   will take care of it.
459 	 */
460 	if (!action)
461 		goto out;
462 
463 	/*
464 	 * Edge triggered interrupts need to remember
465 	 * pending events.
466 	 * This applies to any hw interrupts that allow a second
467 	 * instance of the same irq to arrive while we are in do_IRQ
468 	 * or in the handler. But the code here only handles the _second_
469 	 * instance of the irq, not the third or fourth. So it is mostly
470 	 * useful for irq hardware that does not mask cleanly in an
471 	 * SMP environment.
472 	 */
473 	for (;;) {
474 		spin_unlock(&desc->lock);
475 		handle_IRQ_event(irq, regs, action);
476 		spin_lock(&desc->lock);
477 
478 		if (!(desc->status & IRQ_PENDING))
479 			break;
480 		desc->status &= ~IRQ_PENDING;
481 	}
482 	desc->status &= ~IRQ_INPROGRESS;
483 out:
484 	/*
485 	 * The ->end() handler has to deal with interrupts which got
486 	 * disabled while the handler was running.
487 	 */
488 	desc->handler->end(irq);
489 	spin_unlock(&desc->lock);
490 
491 	if (softirq_pending(cpu))
492 		do_softirq();
493 	return 1;
494 }
495 
496 /**
497  *	request_irq - allocate an interrupt line
498  *	@irq: Interrupt line to allocate
499  *	@handler: Function to be called when the IRQ occurs
500  *	@irqflags: Interrupt type flags
501  *	@devname: An ascii name for the claiming device
502  *	@dev_id: A cookie passed back to the handler function
503  *
504  *	This call allocates interrupt resources and enables the
505  *	interrupt line and IRQ handling. From the point this
506  *	call is made your handler function may be invoked. Since
507  *	your handler function must clear any interrupt the board
508  *	raises, you must take care both to initialise your hardware
509  *	and to set up the interrupt handler in the right order.
510  *
511  *	Dev_id must be globally unique. Normally the address of the
512  *	device data structure is used as the cookie. Since the handler
513  *	receives this value it makes sense to use it.
514  *
515  *	If your interrupt is shared you must pass a non NULL dev_id
516  *	as this is required when freeing the interrupt.
517  *
518  *	Flags:
519  *
520  *	SA_SHIRQ		Interrupt is shared
521  *
522  *	SA_INTERRUPT		Disable local interrupts while processing
523  *
524  *	SA_SAMPLE_RANDOM	The interrupt can be used for entropy
525  *
526  */
527 
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)528 int request_irq(unsigned int irq,
529 		void (*handler)(int, void *, struct pt_regs *),
530 		unsigned long irqflags,
531 		const char * devname,
532 		void *dev_id)
533 {
534 	int retval;
535 	struct irqaction * action;
536 
537 #if 1
538 	/*
539 	 * Sanity-check: shared interrupts should REALLY pass in
540 	 * a real dev-ID, otherwise we'll have trouble later trying
541 	 * to figure out which interrupt is which (messes up the
542 	 * interrupt freeing logic etc).
543 	 */
544 	if (irqflags & SA_SHIRQ) {
545 		if (!dev_id)
546 			printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
547 	}
548 #endif
549 
550 	if (irq >= NR_IRQS)
551 		return -EINVAL;
552 	if (!handler)
553 		return -EINVAL;
554 
555 	action = (struct irqaction *)
556 			kmalloc(sizeof(struct irqaction), GFP_KERNEL);
557 	if (!action)
558 		return -ENOMEM;
559 
560 	action->handler = handler;
561 	action->flags = irqflags;
562 	action->mask = 0;
563 	action->name = devname;
564 	action->next = NULL;
565 	action->dev_id = dev_id;
566 
567 	retval = setup_irq(irq, action);
568 	if (retval)
569 		kfree(action);
570 	return retval;
571 }
572 
573 /**
574  *	free_irq - free an interrupt
575  *	@irq: Interrupt line to free
576  *	@dev_id: Device identity to free
577  *
578  *	Remove an interrupt handler. The handler is removed and if the
579  *	interrupt line is no longer in use by any driver it is disabled.
580  *	On a shared IRQ the caller must ensure the interrupt is disabled
581  *	on the card it drives before calling this function. The function
582  *	does not return until any executing interrupts for this IRQ
583  *	have completed.
584  *
585  *	This function may be called from interrupt context.
586  *
587  *	Bugs: Attempting to free an irq in a handler for the same irq hangs
588  *	      the machine.
589  */
590 
free_irq(unsigned int irq,void * dev_id)591 void free_irq(unsigned int irq, void *dev_id)
592 {
593 	irq_desc_t *desc;
594 	struct irqaction **p;
595 	unsigned long flags;
596 
597 	if (irq >= NR_IRQS)
598 		return;
599 
600 	desc = irq_desc + irq;
601 	spin_lock_irqsave(&desc->lock,flags);
602 	p = &desc->action;
603 	for (;;) {
604 		struct irqaction * action = *p;
605 		if (action) {
606 			struct irqaction **pp = p;
607 			p = &action->next;
608 			if (action->dev_id != dev_id)
609 				continue;
610 
611 			/* Found it - now remove it from the list of entries */
612 			*pp = action->next;
613 			if (!desc->action) {
614 				desc->status |= IRQ_DISABLED;
615 				desc->handler->shutdown(irq);
616 			}
617 			spin_unlock_irqrestore(&desc->lock,flags);
618 
619 #ifdef CONFIG_SMP
620 			/* Wait to make sure it's not being used on another CPU */
621 			while (desc->status & IRQ_INPROGRESS)
622 				barrier();
623 #endif
624 			kfree(action);
625 			return;
626 		}
627 		printk("Trying to free free IRQ%d\n",irq);
628 		spin_unlock_irqrestore(&desc->lock,flags);
629 		return;
630 	}
631 }
632 
633 /*
634  * IRQ autodetection code..
635  *
636  * This depends on the fact that any interrupt that
637  * comes in on to an unassigned handler will get stuck
638  * with "IRQ_WAITING" cleared and the interrupt
639  * disabled.
640  */
641 
642 static DECLARE_MUTEX(probe_sem);
643 
644 /**
645  *	probe_irq_on	- begin an interrupt autodetect
646  *
647  *	Commence probing for an interrupt. The interrupts are scanned
648  *	and a mask of potential interrupt lines is returned.
649  *
650  */
651 
probe_irq_on(void)652 unsigned long probe_irq_on(void)
653 {
654 	unsigned int i;
655 	irq_desc_t *desc;
656 	unsigned long val;
657 	unsigned long delay;
658 
659 	down(&probe_sem);
660 	/*
661 	 * something may have generated an irq long ago and we want to
662 	 * flush such a longstanding irq before considering it as spurious.
663 	 */
664 	for (i = NR_IRQS-1; i > 0; i--)  {
665 		desc = irq_desc + i;
666 
667 		spin_lock_irq(&desc->lock);
668 		if (!irq_desc[i].action)
669 			irq_desc[i].handler->startup(i);
670 		spin_unlock_irq(&desc->lock);
671 	}
672 
673 	/* Wait for longstanding interrupts to trigger. */
674 	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
675 		/* about 20ms delay */ synchronize_irq();
676 
677 	/*
678 	 * enable any unassigned irqs
679 	 * (we must startup again here because if a longstanding irq
680 	 * happened in the previous stage, it may have masked itself)
681 	 */
682 	for (i = NR_IRQS-1; i > 0; i--) {
683 		desc = irq_desc + i;
684 
685 		spin_lock_irq(&desc->lock);
686 		if (!desc->action) {
687 			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
688 			if (desc->handler->startup(i))
689 				desc->status |= IRQ_PENDING;
690 		}
691 		spin_unlock_irq(&desc->lock);
692 	}
693 
694 	/*
695 	 * Wait for spurious interrupts to trigger
696 	 */
697 	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
698 		/* about 100ms delay */ synchronize_irq();
699 
700 	/*
701 	 * Now filter out any obviously spurious interrupts
702 	 */
703 	val = 0;
704 	for (i = 0; i < NR_IRQS; i++) {
705 		irq_desc_t *desc = irq_desc + i;
706 		unsigned int status;
707 
708 		spin_lock_irq(&desc->lock);
709 		status = desc->status;
710 
711 		if (status & IRQ_AUTODETECT) {
712 			/* It triggered already - consider it spurious. */
713 			if (!(status & IRQ_WAITING)) {
714 				desc->status = status & ~IRQ_AUTODETECT;
715 				desc->handler->shutdown(i);
716 			} else
717 				if (i < 32)
718 					val |= 1 << i;
719 		}
720 		spin_unlock_irq(&desc->lock);
721 	}
722 
723 	return val;
724 }
725 
726 /*
727  * Return a mask of triggered interrupts (this
728  * can handle only legacy ISA interrupts).
729  */
730 
731 /**
732  *	probe_irq_mask - scan a bitmap of interrupt lines
733  *	@val:	mask of interrupts to consider
734  *
735  *	Scan the ISA bus interrupt lines and return a bitmap of
736  *	active interrupts. The interrupt probe logic state is then
737  *	returned to its previous value.
738  *
739  *	Note: we need to scan all the irq's even though we will
740  *	only return ISA irq numbers - just so that we reset them
741  *	all to a known state.
742  */
probe_irq_mask(unsigned long val)743 unsigned int probe_irq_mask(unsigned long val)
744 {
745 	int i;
746 	unsigned int mask;
747 
748 	mask = 0;
749 	for (i = 0; i < NR_IRQS; i++) {
750 		irq_desc_t *desc = irq_desc + i;
751 		unsigned int status;
752 
753 		spin_lock_irq(&desc->lock);
754 		status = desc->status;
755 
756 		if (status & IRQ_AUTODETECT) {
757 			if (i < 16 && !(status & IRQ_WAITING))
758 				mask |= 1 << i;
759 
760 			desc->status = status & ~IRQ_AUTODETECT;
761 			desc->handler->shutdown(i);
762 		}
763 		spin_unlock_irq(&desc->lock);
764 	}
765 	up(&probe_sem);
766 
767 	return mask & val;
768 }
769 
770 /*
771  * Return the one interrupt that triggered (this can
772  * handle any interrupt source).
773  */
774 
775 /**
776  *	probe_irq_off	- end an interrupt autodetect
777  *	@val: mask of potential interrupts (unused)
778  *
779  *	Scans the unused interrupt lines and returns the line which
780  *	appears to have triggered the interrupt. If no interrupt was
781  *	found then zero is returned. If more than one interrupt is
782  *	found then minus the first candidate is returned to indicate
783  *	their is doubt.
784  *
785  *	The interrupt probe logic state is returned to its previous
786  *	value.
787  *
788  *	BUGS: When used in a module (which arguably shouldnt happen)
789  *	nothing prevents two IRQ probe callers from overlapping. The
790  *	results of this are non-optimal.
791  */
792 
probe_irq_off(unsigned long val)793 int probe_irq_off(unsigned long val)
794 {
795 	int i, irq_found, nr_irqs;
796 
797 	nr_irqs = 0;
798 	irq_found = 0;
799 	for (i = 0; i < NR_IRQS; i++) {
800 		irq_desc_t *desc = irq_desc + i;
801 		unsigned int status;
802 
803 		spin_lock_irq(&desc->lock);
804 		status = desc->status;
805 
806 		if (status & IRQ_AUTODETECT) {
807 			if (!(status & IRQ_WAITING)) {
808 				if (!nr_irqs)
809 					irq_found = i;
810 				nr_irqs++;
811 			}
812 			desc->status = status & ~IRQ_AUTODETECT;
813 			desc->handler->shutdown(i);
814 		}
815 		spin_unlock_irq(&desc->lock);
816 	}
817 	up(&probe_sem);
818 
819 	if (nr_irqs > 1)
820 		irq_found = -irq_found;
821 	return irq_found;
822 }
823 
824 /* this was setup_x86_irq but it seems pretty generic */
setup_irq(unsigned int irq,struct irqaction * new)825 int setup_irq(unsigned int irq, struct irqaction * new)
826 {
827 	int shared = 0;
828 	unsigned long flags;
829 	struct irqaction *old, **p;
830 	irq_desc_t *desc = irq_desc + irq;
831 
832 	/*
833 	 * Some drivers like serial.c use request_irq() heavily,
834 	 * so we have to be careful not to interfere with a
835 	 * running system.
836 	 */
837 	if (new->flags & SA_SAMPLE_RANDOM) {
838 		/*
839 		 * This function might sleep, we want to call it first,
840 		 * outside of the atomic block.
841 		 * Yes, this might clear the entropy pool if the wrong
842 		 * driver is attempted to be loaded, without actually
843 		 * installing a new handler, but is this really a problem,
844 		 * only the sysadmin is able to do this.
845 		 */
846 		rand_initialize_irq(irq);
847 	}
848 
849 	/*
850 	 * The following block of code has to be executed atomically
851 	 */
852 	spin_lock_irqsave(&desc->lock,flags);
853 	p = &desc->action;
854 	if ((old = *p) != NULL) {
855 		/* Can't share interrupts unless both agree to */
856 		if (!(old->flags & new->flags & SA_SHIRQ)) {
857 			spin_unlock_irqrestore(&desc->lock,flags);
858 			return -EBUSY;
859 		}
860 
861 		/* add new interrupt at end of irq queue */
862 		do {
863 			p = &old->next;
864 			old = *p;
865 		} while (old);
866 		shared = 1;
867 	}
868 
869 	*p = new;
870 
871 	if (!shared) {
872 		desc->depth = 0;
873 		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
874 		desc->handler->startup(irq);
875 	}
876 	spin_unlock_irqrestore(&desc->lock,flags);
877 
878 	register_irq_proc(irq);
879 	return 0;
880 }
881 
init_generic_irq(void)882 void __init init_generic_irq(void)
883 {
884 	int i;
885 
886 	for (i = 0; i < NR_IRQS; i++) {
887 		irq_desc[i].status  = IRQ_DISABLED;
888 		irq_desc[i].action  = NULL;
889 		irq_desc[i].depth   = 1;
890 		irq_desc[i].handler = &no_irq_type;
891 	}
892 }
893 
894 EXPORT_SYMBOL(disable_irq_nosync);
895 EXPORT_SYMBOL(disable_irq);
896 EXPORT_SYMBOL(enable_irq);
897 EXPORT_SYMBOL(probe_irq_mask);
898 
899 static struct proc_dir_entry * root_irq_dir;
900 static struct proc_dir_entry * irq_dir [NR_IRQS];
901 
902 #define HEX_DIGITS 8
903 
parse_hex_value(const char * buffer,unsigned long count,unsigned long * ret)904 static unsigned int parse_hex_value (const char *buffer,
905 		unsigned long count, unsigned long *ret)
906 {
907 	unsigned char hexnum [HEX_DIGITS];
908 	unsigned long value;
909 	int i;
910 
911 	if (!count)
912 		return -EINVAL;
913 	if (count > HEX_DIGITS)
914 		count = HEX_DIGITS;
915 	if (copy_from_user(hexnum, buffer, count))
916 		return -EFAULT;
917 
918 	/*
919 	 * Parse the first HEX_DIGITS characters as a hex string, any non-hex
920 	 * char is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
921 	 */
922 	value = 0;
923 
924 	for (i = 0; i < count; i++) {
925 		unsigned int c = hexnum[i];
926 
927 		switch (c) {
928 			case '0' ... '9': c -= '0'; break;
929 			case 'a' ... 'f': c -= 'a'-10; break;
930 			case 'A' ... 'F': c -= 'A'-10; break;
931 		default:
932 			goto out;
933 		}
934 		value = (value << 4) | c;
935 	}
936 out:
937 	*ret = value;
938 	return 0;
939 }
940 
941 #if CONFIG_SMP
942 
943 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
944 
945 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
irq_affinity_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)946 static int irq_affinity_read_proc (char *page, char **start, off_t off,
947 			int count, int *eof, void *data)
948 {
949 	if (count < HEX_DIGITS+1)
950 		return -EINVAL;
951 	return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
952 }
953 
irq_affinity_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)954 static int irq_affinity_write_proc (struct file *file, const char *buffer,
955 					unsigned long count, void *data)
956 {
957 	int irq = (long) data, full_count = count, err;
958 	unsigned long new_value;
959 
960 	if (!irq_desc[irq].handler->set_affinity)
961 		return -EIO;
962 
963 	err = parse_hex_value(buffer, count, &new_value);
964 
965 	/*
966 	 * Do not allow disabling IRQs completely - it's a too easy
967 	 * way to make the system unusable accidentally :-) At least
968 	 * one online CPU still has to be targeted.
969 	 */
970 	if (!(new_value & cpu_online_map))
971 		return -EINVAL;
972 
973 	irq_affinity[irq] = new_value;
974 	irq_desc[irq].handler->set_affinity(irq, new_value);
975 
976 	return full_count;
977 }
978 
979 #endif
980 
prof_cpu_mask_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)981 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
982 			int count, int *eof, void *data)
983 {
984 	unsigned long *mask = (unsigned long *) data;
985 	if (count < HEX_DIGITS+1)
986 		return -EINVAL;
987 	return sprintf (page, "%08lx\n", *mask);
988 }
989 
prof_cpu_mask_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)990 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
991 					unsigned long count, void *data)
992 {
993 	unsigned long *mask = (unsigned long *) data, full_count = count, err;
994 	unsigned long new_value;
995 
996 	err = parse_hex_value(buffer, count, &new_value);
997 	if (err)
998 		return err;
999 
1000 	*mask = new_value;
1001 	return full_count;
1002 }
1003 
1004 #define MAX_NAMELEN 10
1005 
register_irq_proc(unsigned int irq)1006 static void register_irq_proc (unsigned int irq)
1007 {
1008 	char name [MAX_NAMELEN];
1009 
1010 	if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
1011 			irq_dir[irq])
1012 		return;
1013 
1014 	memset(name, 0, MAX_NAMELEN);
1015 	sprintf(name, "%d", irq);
1016 
1017 	/* create /proc/irq/1234 */
1018 	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1019 
1020 #if CONFIG_SMP
1021 	{
1022 		struct proc_dir_entry *entry;
1023 
1024 		/* create /proc/irq/1234/smp_affinity */
1025 		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1026 
1027 		if (entry) {
1028 			entry->nlink = 1;
1029 			entry->data = (void *)(long)irq;
1030 			entry->read_proc = irq_affinity_read_proc;
1031 			entry->write_proc = irq_affinity_write_proc;
1032 		}
1033 
1034 		smp_affinity_entry[irq] = entry;
1035 	}
1036 #endif
1037 }
1038 
1039 unsigned long prof_cpu_mask = -1;
1040 
init_irq_proc(void)1041 void init_irq_proc (void)
1042 {
1043 	struct proc_dir_entry *entry;
1044 	int i;
1045 
1046 	/* create /proc/irq */
1047 	root_irq_dir = proc_mkdir("irq", 0);
1048 
1049 	/* create /proc/irq/prof_cpu_mask */
1050 	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1051 
1052 	if (!entry)
1053 	    return;
1054 
1055 	entry->nlink = 1;
1056 	entry->data = (void *)&prof_cpu_mask;
1057 	entry->read_proc = prof_cpu_mask_read_proc;
1058 	entry->write_proc = prof_cpu_mask_write_proc;
1059 
1060 	/*
1061 	 * Create entries for all existing IRQs.
1062 	 */
1063 	for (i = 0; i < NR_IRQS; i++)
1064 		register_irq_proc(i);
1065 }
1066