1 /*
2  *	linux/arch/ia64/kernel/irq.c
3  *
4  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5  *
6  * This file contains the code used by various IRQ handling routines:
7  * asking for different IRQ's should be done through these routines
8  * instead of just grabbing them. Thus setups with different IRQ numbers
9  * shouldn't result in any weird surprises, and installing new handlers
10  * should be easier.
11  */
12 
13 /*
14  * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
15  *
16  * IRQs are in fact implemented a bit like signal handlers for the kernel.
17  * Naturally it's not a 1:1 relation, but there are similarities.
18  */
19 
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/errno.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 
36 #include <asm/atomic.h>
37 #include <asm/io.h>
38 #include <asm/smp.h>
39 #include <asm/system.h>
40 #include <asm/bitops.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgalloc.h>
43 #include <asm/delay.h>
44 #include <asm/irq.h>
45 
46 
47 
48 /*
49  * Linux has a controller-independent x86 interrupt architecture.
50  * every controller has a 'controller-template', that is used
51  * by the main code to do the right thing. Each driver-visible
52  * interrupt source is transparently wired to the apropriate
53  * controller. Thus drivers need not be aware of the
54  * interrupt-controller.
55  *
56  * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
57  * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
58  * (IO-APICs assumed to be messaging to Pentium local-APICs)
59  *
60  * the code is designed to be easily extended with new/different
61  * interrupt controllers, without having to do assembly magic.
62  */
63 
64 /*
65  * Controller mappings for all interrupt sources:
66  */
67 irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned =
68 	{ [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
69 
70 #ifdef CONFIG_IA64_GENERIC
71 struct irq_desc *
__ia64_irq_desc(unsigned int irq)72 __ia64_irq_desc (unsigned int irq)
73 {
74 	return _irq_desc + irq;
75 }
76 
77 ia64_vector
__ia64_irq_to_vector(unsigned int irq)78 __ia64_irq_to_vector (unsigned int irq)
79 {
80 	return (ia64_vector) irq;
81 }
82 
83 unsigned int
__ia64_local_vector_to_irq(ia64_vector vec)84 __ia64_local_vector_to_irq (ia64_vector vec)
85 {
86 	return (unsigned int) vec;
87 }
88 
89 #endif
90 
91 static void register_irq_proc (unsigned int irq);
92 
93 /*
94  * Special irq handlers.
95  */
96 
no_action(int cpl,void * dev_id,struct pt_regs * regs)97 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
98 
99 /*
100  * Generic no controller code
101  */
102 
enable_none(unsigned int irq)103 static void enable_none(unsigned int irq) { }
startup_none(unsigned int irq)104 static unsigned int startup_none(unsigned int irq) { return 0; }
disable_none(unsigned int irq)105 static void disable_none(unsigned int irq) { }
ack_none(unsigned int irq)106 static void ack_none(unsigned int irq)
107 {
108 /*
109  * 'what should we do if we get a hw irq event on an illegal vector'.
110  * each architecture has to answer this themselves, it doesnt deserve
111  * a generic callback i think.
112  */
113 #if CONFIG_X86
114 	printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
115 #ifdef CONFIG_X86_LOCAL_APIC
116 	/*
117 	 * Currently unexpected vectors happen only on SMP and APIC.
118 	 * We _must_ ack these because every local APIC has only N
119 	 * irq slots per priority level, and a 'hanging, unacked' IRQ
120 	 * holds up an irq slot - in excessive cases (when multiple
121 	 * unexpected vectors occur) that might lock up the APIC
122 	 * completely.
123 	 */
124 	ack_APIC_irq();
125 #endif
126 #endif
127 #if CONFIG_IA64
128 	printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
129 #endif
130 }
131 
132 /* startup is the same as "enable", shutdown is same as "disable" */
133 #define shutdown_none	disable_none
134 #define end_none	enable_none
135 
136 struct hw_interrupt_type no_irq_type = {
137 	"none",
138 	startup_none,
139 	shutdown_none,
140 	enable_none,
141 	disable_none,
142 	ack_none,
143 	end_none
144 };
145 
146 atomic_t irq_err_count;
147 #if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
148 atomic_t irq_mis_count;
149 #endif
150 
151 /*
152  * Generic, controller-independent functions:
153  */
154 
get_irq_list(char * buf)155 int get_irq_list(char *buf)
156 {
157 	int i, j;
158 	struct irqaction * action;
159 	irq_desc_t *idesc;
160 	char *p = buf;
161 
162 	p += sprintf(p, "           ");
163 	for (j=0; j<smp_num_cpus; j++)
164 		p += sprintf(p, "CPU%d       ",j);
165 	*p++ = '\n';
166 
167 	for (i = 0 ; i < NR_IRQS ; i++) {
168 		idesc = irq_desc(i);
169 		action = idesc->action;
170 		if (!action)
171 			continue;
172 		p += sprintf(p, "%3d: ",i);
173 #ifndef CONFIG_SMP
174 		p += sprintf(p, "%10u ", kstat_irqs(i));
175 #else
176 		for (j = 0; j < smp_num_cpus; j++)
177 			p += sprintf(p, "%10u ",
178 				kstat.irqs[cpu_logical_map(j)][i]);
179 #endif
180 		p += sprintf(p, " %14s", idesc->handler->typename);
181 		p += sprintf(p, "  %s", action->name);
182 
183 		for (action=action->next; action; action = action->next)
184 			p += sprintf(p, ", %s", action->name);
185 		*p++ = '\n';
186 	}
187 	p += sprintf(p, "NMI: ");
188 	for (j = 0; j < smp_num_cpus; j++)
189 		p += sprintf(p, "%10u ",
190 			nmi_count(cpu_logical_map(j)));
191 	p += sprintf(p, "\n");
192 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
193 	p += sprintf(p, "LOC: ");
194 	for (j = 0; j < smp_num_cpus; j++)
195 		p += sprintf(p, "%10u ",
196 			apic_timer_irqs[cpu_logical_map(j)]);
197 	p += sprintf(p, "\n");
198 #endif
199 	p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
200 #if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
201 	p += sprintf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
202 #endif
203 	return p - buf;
204 }
205 
206 
207 /*
208  * Global interrupt locks for SMP. Allow interrupts to come in on any
209  * CPU, yet make cli/sti act globally to protect critical regions..
210  */
211 
212 #ifdef CONFIG_SMP
213 unsigned int global_irq_holder = NO_PROC_ID;
214 unsigned volatile long global_irq_lock; /* pedantic: long for set_bit --RR */
215 
216 extern void show_stack(unsigned long* esp);
217 
show(char * str)218 static void show(char * str)
219 {
220 	int i;
221 	int cpu = smp_processor_id();
222 
223 	printk("\n%s, CPU %d:\n", str, cpu);
224 	printk("irq:  %d [",irqs_running());
225 	for(i=0;i < smp_num_cpus;i++)
226 		printk(" %d",irq_count(i));
227 	printk(" ]\nbh:   %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
228 	for(i=0;i < smp_num_cpus;i++)
229 		printk(" %d",bh_count(i));
230 
231 	printk(" ]\nStack dumps:");
232 #if defined(CONFIG_IA64)
233 	/*
234 	 * We can't unwind the stack of another CPU without access to
235 	 * the registers of that CPU.  And sending an IPI when we're
236 	 * in a potentially wedged state doesn't sound like a smart
237 	 * idea.
238 	 */
239 #elif defined(CONFIG_X86)
240 	for(i=0;i< smp_num_cpus;i++) {
241 		unsigned long esp;
242 		if(i==cpu)
243 			continue;
244 		printk("\nCPU %d:",i);
245 		esp = init_tss[i].esp0;
246 		if(esp==NULL) {
247 			/* tss->esp0 is set to NULL in cpu_init(),
248 			 * it's initialized when the cpu returns to user
249 			 * space. -- manfreds
250 			 */
251 			printk(" <unknown> ");
252 			continue;
253 		}
254 		esp &= ~(THREAD_SIZE-1);
255 		esp += sizeof(struct task_struct);
256 		show_stack((void*)esp);
257 	}
258 #else
259 	You lose...
260 #endif
261 	printk("\nCPU %d:",cpu);
262 	show_stack(NULL);
263 	printk("\n");
264 }
265 
266 #define MAXCOUNT 100000000
267 
268 /*
269  * I had a lockup scenario where a tight loop doing
270  * spin_unlock()/spin_lock() on CPU#1 was racing with
271  * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
272  * apparently the spin_unlock() information did not make it
273  * through to CPU#0 ... nasty, is this by design, do we have to limit
274  * 'memory update oscillation frequency' artificially like here?
275  *
276  * Such 'high frequency update' races can be avoided by careful design, but
277  * some of our major constructs like spinlocks use similar techniques,
278  * it would be nice to clarify this issue. Set this define to 0 if you
279  * want to check whether your system freezes.  I suspect the delay done
280  * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
281  * i thought that such things are guaranteed by design, since we use
282  * the 'LOCK' prefix.
283  */
284 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
285 
286 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
287 # define SYNC_OTHER_CORES(x) udelay(x+1)
288 #else
289 /*
290  * We have to allow irqs to arrive between __sti and __cli
291  */
292 # ifdef CONFIG_IA64
293 #  define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")
294 # else
295 #  define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
296 # endif
297 #endif
298 
wait_on_irq(void)299 static inline void wait_on_irq(void)
300 {
301 	int count = MAXCOUNT;
302 
303 	for (;;) {
304 
305 		/*
306 		 * Wait until all interrupts are gone. Wait
307 		 * for bottom half handlers unless we're
308 		 * already executing in one..
309 		 */
310 		if (!irqs_running())
311 			if (really_local_bh_count() || !spin_is_locked(&global_bh_lock))
312 				break;
313 
314 		/* Duh, we have to loop. Release the lock to avoid deadlocks */
315 		smp_mb__before_clear_bit();	/* need barrier before releasing lock... */
316 		clear_bit(0,&global_irq_lock);
317 
318 		for (;;) {
319 			if (!--count) {
320 				show("wait_on_irq");
321 				count = ~0;
322 			}
323 			__sti();
324 			SYNC_OTHER_CORES(smp_processor_id());
325 			__cli();
326 			if (irqs_running())
327 				continue;
328 			if (global_irq_lock)
329 				continue;
330 			if (!really_local_bh_count() && spin_is_locked(&global_bh_lock))
331 				continue;
332 			if (!test_and_set_bit(0,&global_irq_lock))
333 				break;
334 		}
335 	}
336 }
337 
338 /*
339  * This is called when we want to synchronize with
340  * interrupts. We may for example tell a device to
341  * stop sending interrupts: but to make sure there
342  * are no interrupts that are executing on another
343  * CPU we need to call this function.
344  */
synchronize_irq(void)345 void synchronize_irq(void)
346 {
347 	if (irqs_running()) {
348 		/* Stupid approach */
349 		cli();
350 		sti();
351 	}
352 }
353 
get_irqlock(void)354 static inline void get_irqlock(void)
355 {
356 	if (test_and_set_bit(0,&global_irq_lock)) {
357 		/* do we already hold the lock? */
358 		if (smp_processor_id() == global_irq_holder)
359 			return;
360 		/* Uhhuh.. Somebody else got it. Wait.. */
361 		do {
362 			do {
363 #ifdef CONFIG_X86
364 				rep_nop();
365 #endif
366 			} while (test_bit(0,&global_irq_lock));
367 		} while (test_and_set_bit(0,&global_irq_lock));
368 	}
369 	/*
370 	 * We also to make sure that nobody else is running
371 	 * in an interrupt context.
372 	 */
373 	wait_on_irq();
374 
375 	/*
376 	 * Ok, finally..
377 	 */
378 	global_irq_holder = smp_processor_id();
379 }
380 
381 #define EFLAGS_IF_SHIFT 9
382 
383 /*
384  * A global "cli()" while in an interrupt context
385  * turns into just a local cli(). Interrupts
386  * should use spinlocks for the (very unlikely)
387  * case that they ever want to protect against
388  * each other.
389  *
390  * If we already have local interrupts disabled,
391  * this will not turn a local disable into a
392  * global one (problems with spinlocks: this makes
393  * save_flags+cli+sti usable inside a spinlock).
394  */
__global_cli(void)395 void __global_cli(void)
396 {
397 	unsigned int flags;
398 
399 #ifdef CONFIG_IA64
400 	__save_flags(flags);
401 	if (flags & IA64_PSR_I) {
402 		__cli();
403 		if (!really_local_irq_count())
404 			get_irqlock();
405 	}
406 #else
407 	__save_flags(flags);
408 	if (flags & (1 << EFLAGS_IF_SHIFT)) {
409 		__cli();
410 		if (!really_local_irq_count())
411 			get_irqlock();
412 	}
413 #endif
414 }
415 
__global_sti(void)416 void __global_sti(void)
417 {
418 	if (!really_local_irq_count())
419 		release_irqlock(smp_processor_id());
420 	__sti();
421 }
422 
423 /*
424  * SMP flags value to restore to:
425  * 0 - global cli
426  * 1 - global sti
427  * 2 - local cli
428  * 3 - local sti
429  */
__global_save_flags(void)430 unsigned long __global_save_flags(void)
431 {
432 	int retval;
433 	int local_enabled;
434 	unsigned long flags;
435 	int cpu = smp_processor_id();
436 
437 	__save_flags(flags);
438 #ifdef CONFIG_IA64
439 	local_enabled = (flags & IA64_PSR_I) != 0;
440 #else
441 	local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
442 #endif
443 	/* default to local */
444 	retval = 2 + local_enabled;
445 
446 	/* check for global flags if we're not in an interrupt */
447 	if (!really_local_irq_count()) {
448 		if (local_enabled)
449 			retval = 1;
450 		if (global_irq_holder == cpu)
451 			retval = 0;
452 	}
453 	return retval;
454 }
455 
__global_restore_flags(unsigned long flags)456 void __global_restore_flags(unsigned long flags)
457 {
458 	switch (flags) {
459 	case 0:
460 		__global_cli();
461 		break;
462 	case 1:
463 		__global_sti();
464 		break;
465 	case 2:
466 		__cli();
467 		break;
468 	case 3:
469 		__sti();
470 		break;
471 	default:
472 		printk("global_restore_flags: %08lx (%08lx)\n",
473 			flags, (&flags)[-1]);
474 	}
475 }
476 
477 #endif
478 
479 /*
480  * This should really return information about whether
481  * we should do bottom half handling etc. Right now we
482  * end up _always_ checking the bottom half, which is a
483  * waste of time and is not what some drivers would
484  * prefer.
485  */
handle_IRQ_event(unsigned int irq,struct pt_regs * regs,struct irqaction * action)486 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
487 {
488 	int status;
489 
490 	local_irq_enter(irq);
491 
492 	status = 1;	/* Force the "do bottom halves" bit */
493 
494 	if (!(action->flags & SA_INTERRUPT))
495 		__sti();
496 
497 	do {
498 		status |= action->flags;
499 		action->handler(irq, action->dev_id, regs);
500 		action = action->next;
501 	} while (action);
502 	if (status & SA_SAMPLE_RANDOM)
503 		add_interrupt_randomness(irq);
504 	__cli();
505 
506 	local_irq_exit(irq);
507 
508 	return status;
509 }
510 
511 /**
512  *	disable_irq_nosync - disable an irq without waiting
513  *	@irq: Interrupt to disable
514  *
515  *	Disable the selected interrupt line.  Disables and Enables are
516  *	nested.
517  *	Unlike disable_irq(), this function does not ensure existing
518  *	instances of the IRQ handler have completed before returning.
519  *
520  *	This function may be called from IRQ context.
521  */
522 
disable_irq_nosync(unsigned int irq)523 inline void disable_irq_nosync(unsigned int irq)
524 {
525 	irq_desc_t *desc = irq_desc(irq);
526 	unsigned long flags;
527 
528 	spin_lock_irqsave(&desc->lock, flags);
529 	if (!desc->depth++) {
530 		desc->status |= IRQ_DISABLED;
531 		desc->handler->disable(irq);
532 	}
533 	spin_unlock_irqrestore(&desc->lock, flags);
534 }
535 
536 /**
537  *	disable_irq - disable an irq and wait for completion
538  *	@irq: Interrupt to disable
539  *
540  *	Disable the selected interrupt line.  Enables and Disables are
541  *	nested.
542  *	This function waits for any pending IRQ handlers for this interrupt
543  *	to complete before returning. If you use this function while
544  *	holding a resource the IRQ handler may need you will deadlock.
545  *
546  *	This function may be called - with care - from IRQ context.
547  */
548 
disable_irq(unsigned int irq)549 void disable_irq(unsigned int irq)
550 {
551 	disable_irq_nosync(irq);
552 
553 #ifdef CONFIG_SMP
554 	if (!really_local_irq_count()) {
555 		do {
556 			barrier();
557 		} while (irq_desc(irq)->status & IRQ_INPROGRESS);
558 	}
559 #endif
560 }
561 
562 /**
563  *	enable_irq - enable handling of an irq
564  *	@irq: Interrupt to enable
565  *
566  *	Undoes the effect of one call to disable_irq().  If this
567  *	matches the last disable, processing of interrupts on this
568  *	IRQ line is re-enabled.
569  *
570  *	This function may be called from IRQ context.
571  */
572 
enable_irq(unsigned int irq)573 void enable_irq(unsigned int irq)
574 {
575 	irq_desc_t *desc = irq_desc(irq);
576 	unsigned long flags;
577 
578 	spin_lock_irqsave(&desc->lock, flags);
579 	switch (desc->depth) {
580 	case 1: {
581 		unsigned int status = desc->status & ~IRQ_DISABLED;
582 		desc->status = status;
583 		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
584 			desc->status = status | IRQ_REPLAY;
585 			hw_resend_irq(desc->handler,irq);
586 		}
587 		desc->handler->enable(irq);
588 		/* fall-through */
589 	}
590 	default:
591 		desc->depth--;
592 		break;
593 	case 0:
594 		printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
595 		       irq, (void *) __builtin_return_address(0));
596 	}
597 	spin_unlock_irqrestore(&desc->lock, flags);
598 }
599 
600 /*
601  * do_IRQ handles all normal device IRQ's (the special
602  * SMP cross-CPU interrupts have their own specific
603  * handlers).
604  */
do_IRQ(unsigned long irq,struct pt_regs * regs)605 unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
606 {
607 	/*
608 	 * We ack quickly, we don't want the irq controller
609 	 * thinking we're snobs just because some other CPU has
610 	 * disabled global interrupts (we have already done the
611 	 * INT_ACK cycles, it's too late to try to pretend to the
612 	 * controller that we aren't taking the interrupt).
613 	 *
614 	 * 0 return value means that this irq is already being
615 	 * handled by some other CPU. (or is disabled)
616 	 */
617 	int cpu = smp_processor_id();
618 	irq_desc_t *desc = irq_desc(irq);
619 	struct irqaction * action;
620 	unsigned int status;
621 
622 	kstat.irqs[cpu][irq]++;
623 
624 	if (desc->status & IRQ_PER_CPU) {
625 		/* no locking required for CPU-local interrupts: */
626 		desc->handler->ack(irq);
627 		handle_IRQ_event(irq, regs, desc->action);
628 		desc->handler->end(irq);
629 	} else {
630 		spin_lock(&desc->lock);
631 		desc->handler->ack(irq);
632 		/*
633 		 * REPLAY is when Linux resends an IRQ that was dropped earlier
634 		 * WAITING is used by probe to mark irqs that are being tested
635 		 */
636 		status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
637 		status |= IRQ_PENDING; /* we _want_ to handle it */
638 
639 		/*
640 		 * If the IRQ is disabled for whatever reason, we cannot
641 		 * use the action we have.
642 		 */
643 		action = NULL;
644 		if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
645 			action = desc->action;
646 			status &= ~IRQ_PENDING; /* we commit to handling */
647 			status |= IRQ_INPROGRESS; /* we are handling it */
648 		}
649 		desc->status = status;
650 
651 		/*
652 		 * If there is no IRQ handler or it was disabled, exit early.
653 		 * Since we set PENDING, if another processor is handling
654 		 * a different instance of this same irq, the other processor
655 		 * will take care of it.
656 		 */
657 		if (!action)
658 			goto out;
659 
660 		/*
661 		 * Edge triggered interrupts need to remember
662 		 * pending events.
663 		 * This applies to any hw interrupts that allow a second
664 		 * instance of the same irq to arrive while we are in do_IRQ
665 		 * or in the handler. But the code here only handles the _second_
666 		 * instance of the irq, not the third or fourth. So it is mostly
667 		 * useful for irq hardware that does not mask cleanly in an
668 		 * SMP environment.
669 		 */
670 		for (;;) {
671 			spin_unlock(&desc->lock);
672 			handle_IRQ_event(irq, regs, action);
673 			spin_lock(&desc->lock);
674 
675 			if (!(desc->status & IRQ_PENDING))
676 				break;
677 			desc->status &= ~IRQ_PENDING;
678 		}
679 		desc->status &= ~IRQ_INPROGRESS;
680 	  out:
681 		/*
682 		 * The ->end() handler has to deal with interrupts which got
683 		 * disabled while the handler was running.
684 		 */
685 		desc->handler->end(irq);
686 		spin_unlock(&desc->lock);
687 	}
688 	return 1;
689 }
690 
691 /**
692  *	request_irq - allocate an interrupt line
693  *	@irq: Interrupt line to allocate
694  *	@handler: Function to be called when the IRQ occurs
695  *	@irqflags: Interrupt type flags
696  *	@devname: An ascii name for the claiming device
697  *	@dev_id: A cookie passed back to the handler function
698  *
699  *	This call allocates interrupt resources and enables the
700  *	interrupt line and IRQ handling. From the point this
701  *	call is made your handler function may be invoked. Since
702  *	your handler function must clear any interrupt the board
703  *	raises, you must take care both to initialise your hardware
704  *	and to set up the interrupt handler in the right order.
705  *
706  *	Dev_id must be globally unique. Normally the address of the
707  *	device data structure is used as the cookie. Since the handler
708  *	receives this value it makes sense to use it.
709  *
710  *	If your interrupt is shared you must pass a non NULL dev_id
711  *	as this is required when freeing the interrupt.
712  *
713  *	Flags:
714  *
715  *	SA_SHIRQ		Interrupt is shared
716  *
717  *	SA_INTERRUPT		Disable local interrupts while processing
718  *
719  *	SA_SAMPLE_RANDOM	The interrupt can be used for entropy
720  *
721  */
722 
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)723 int request_irq(unsigned int irq,
724 		void (*handler)(int, void *, struct pt_regs *),
725 		unsigned long irqflags,
726 		const char * devname,
727 		void *dev_id)
728 {
729 	int retval;
730 	struct irqaction * action;
731 
732 #if 1
733 	/*
734 	 * Sanity-check: shared interrupts should REALLY pass in
735 	 * a real dev-ID, otherwise we'll have trouble later trying
736 	 * to figure out which interrupt is which (messes up the
737 	 * interrupt freeing logic etc).
738 	 */
739 	if (irqflags & SA_SHIRQ) {
740 		if (!dev_id)
741 			printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
742 	}
743 #endif
744 
745 	if (irq >= NR_IRQS)
746 		return -EINVAL;
747 	if (!handler)
748 		return -EINVAL;
749 
750 	action = (struct irqaction *)
751 			kmalloc(sizeof(struct irqaction), GFP_KERNEL);
752 	if (!action)
753 		return -ENOMEM;
754 
755 	action->handler = handler;
756 	action->flags = irqflags;
757 	action->mask = 0;
758 	action->name = devname;
759 	action->next = NULL;
760 	action->dev_id = dev_id;
761 
762 	retval = setup_irq(irq, action);
763 	if (retval)
764 		kfree(action);
765 	return retval;
766 }
767 
768 /**
769  *	free_irq - free an interrupt
770  *	@irq: Interrupt line to free
771  *	@dev_id: Device identity to free
772  *
773  *	Remove an interrupt handler. The handler is removed and if the
774  *	interrupt line is no longer in use by any driver it is disabled.
775  *	On a shared IRQ the caller must ensure the interrupt is disabled
776  *	on the card it drives before calling this function. The function
777  *	does not return until any executing interrupts for this IRQ
778  *	have completed.
779  *
780  *	This function may be called from interrupt context.
781  *
782  *	Bugs: Attempting to free an irq in a handler for the same irq hangs
783  *	      the machine.
784  */
785 
free_irq(unsigned int irq,void * dev_id)786 void free_irq(unsigned int irq, void *dev_id)
787 {
788 	irq_desc_t *desc;
789 	struct irqaction **p;
790 	unsigned long flags;
791 
792 	if (irq >= NR_IRQS)
793 		return;
794 
795 	desc = irq_desc(irq);
796 	spin_lock_irqsave(&desc->lock,flags);
797 	p = &desc->action;
798 	for (;;) {
799 		struct irqaction * action = *p;
800 		if (action) {
801 			struct irqaction **pp = p;
802 			p = &action->next;
803 			if (action->dev_id != dev_id)
804 				continue;
805 
806 			/* Found it - now remove it from the list of entries */
807 			*pp = action->next;
808 			if (!desc->action) {
809 				desc->status |= IRQ_DISABLED;
810 				desc->handler->shutdown(irq);
811 			}
812 			spin_unlock_irqrestore(&desc->lock,flags);
813 
814 #ifdef CONFIG_SMP
815 			/* Wait to make sure it's not being used on another CPU */
816 			while (desc->status & IRQ_INPROGRESS)
817 				barrier();
818 #endif
819 			kfree(action);
820 			return;
821 		}
822 		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
823 		spin_unlock_irqrestore(&desc->lock,flags);
824 		return;
825 	}
826 }
827 
828 /*
829  * IRQ autodetection code..
830  *
831  * This depends on the fact that any interrupt that
832  * comes in on to an unassigned handler will get stuck
833  * with "IRQ_WAITING" cleared and the interrupt
834  * disabled.
835  */
836 
837 static DECLARE_MUTEX(probe_sem);
838 
839 /**
840  *	probe_irq_on	- begin an interrupt autodetect
841  *
842  *	Commence probing for an interrupt. The interrupts are scanned
843  *	and a mask of potential interrupt lines is returned.
844  *
845  */
846 
probe_irq_on(void)847 unsigned long probe_irq_on(void)
848 {
849 	unsigned int i;
850 	irq_desc_t *desc;
851 	unsigned long val;
852 	unsigned long delay;
853 
854 	down(&probe_sem);
855 	/*
856 	 * something may have generated an irq long ago and we want to
857 	 * flush such a longstanding irq before considering it as spurious.
858 	 */
859 	for (i = NR_IRQS-1; i > 0; i--)  {
860 		desc = irq_desc(i);
861 
862 		spin_lock_irq(&desc->lock);
863 		if (!desc->action)
864 			desc->handler->startup(i);
865 		spin_unlock_irq(&desc->lock);
866 	}
867 
868 	/* Wait for longstanding interrupts to trigger. */
869 	for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
870 		/* about 20ms delay */ synchronize_irq();
871 
872 	/*
873 	 * enable any unassigned irqs
874 	 * (we must startup again here because if a longstanding irq
875 	 * happened in the previous stage, it may have masked itself)
876 	 */
877 	for (i = NR_IRQS-1; i > 0; i--) {
878 		desc = irq_desc(i);
879 
880 		spin_lock_irq(&desc->lock);
881 		if (!desc->action) {
882 			desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
883 			if (desc->handler->startup(i))
884 				desc->status |= IRQ_PENDING;
885 		}
886 		spin_unlock_irq(&desc->lock);
887 	}
888 
889 	/*
890 	 * Wait for spurious interrupts to trigger
891 	 */
892 	for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
893 		/* about 100ms delay */ synchronize_irq();
894 
895 	/*
896 	 * Now filter out any obviously spurious interrupts
897 	 */
898 	val = 0;
899 	for (i = 0; i < NR_IRQS; i++) {
900 		irq_desc_t *desc = irq_desc(i);
901 		unsigned int status;
902 
903 		spin_lock_irq(&desc->lock);
904 		status = desc->status;
905 
906 		if (status & IRQ_AUTODETECT) {
907 			/* It triggered already - consider it spurious. */
908 			if (!(status & IRQ_WAITING)) {
909 				desc->status = status & ~IRQ_AUTODETECT;
910 				desc->handler->shutdown(i);
911 			} else
912 				if (i < 32)
913 					val |= 1 << i;
914 		}
915 		spin_unlock_irq(&desc->lock);
916 	}
917 
918 	return val;
919 }
920 
921 /**
922  *	probe_irq_mask - scan a bitmap of interrupt lines
923  *	@val:	mask of interrupts to consider
924  *
925  *	Scan the ISA bus interrupt lines and return a bitmap of
926  *	active interrupts. The interrupt probe logic state is then
927  *	returned to its previous value.
928  *
929  *	Note: we need to scan all the irq's even though we will
930  *	only return ISA irq numbers - just so that we reset them
931  *	all to a known state.
932  */
933 
probe_irq_mask(unsigned long val)934 unsigned int probe_irq_mask(unsigned long val)
935 {
936 	int i;
937 	unsigned int mask;
938 
939 	mask = 0;
940 	for (i = 0; i < 16; i++) {
941 		irq_desc_t *desc = irq_desc(i);
942 		unsigned int status;
943 
944 		spin_lock_irq(&desc->lock);
945 		status = desc->status;
946 
947 		if (status & IRQ_AUTODETECT) {
948 			if (!(status & IRQ_WAITING))
949 				mask |= 1 << i;
950 
951 			desc->status = status & ~IRQ_AUTODETECT;
952 			desc->handler->shutdown(i);
953 		}
954 		spin_unlock_irq(&desc->lock);
955 	}
956 	up(&probe_sem);
957 
958 	return mask & val;
959 }
960 
961 /**
962  *	probe_irq_off	- end an interrupt autodetect
963  *	@val: mask of potential interrupts (unused)
964  *
965  *	Scans the unused interrupt lines and returns the line which
966  *	appears to have triggered the interrupt. If no interrupt was
967  *	found then zero is returned. If more than one interrupt is
968  *	found then minus the first candidate is returned to indicate
969  *	their is doubt.
970  *
971  *	The interrupt probe logic state is returned to its previous
972  *	value.
973  *
974  *	BUGS: When used in a module (which arguably shouldnt happen)
975  *	nothing prevents two IRQ probe callers from overlapping. The
976  *	results of this are non-optimal.
977  */
978 
probe_irq_off(unsigned long val)979 int probe_irq_off(unsigned long val)
980 {
981 	int i, irq_found, nr_irqs;
982 
983 	nr_irqs = 0;
984 	irq_found = 0;
985 	for (i = 0; i < NR_IRQS; i++) {
986 		irq_desc_t *desc = irq_desc(i);
987 		unsigned int status;
988 
989 		spin_lock_irq(&desc->lock);
990 		status = desc->status;
991 
992 		if (status & IRQ_AUTODETECT) {
993 			if (!(status & IRQ_WAITING)) {
994 				if (!nr_irqs)
995 					irq_found = i;
996 				nr_irqs++;
997 			}
998 			desc->status = status & ~IRQ_AUTODETECT;
999 			desc->handler->shutdown(i);
1000 		}
1001 		spin_unlock_irq(&desc->lock);
1002 	}
1003 	up(&probe_sem);
1004 
1005 	if (nr_irqs > 1)
1006 		irq_found = -irq_found;
1007 	return irq_found;
1008 }
1009 
setup_irq(unsigned int irq,struct irqaction * new)1010 int setup_irq(unsigned int irq, struct irqaction * new)
1011 {
1012 	int shared = 0;
1013 	unsigned long flags;
1014 	struct irqaction *old, **p;
1015 	irq_desc_t *desc = irq_desc(irq);
1016 
1017 	/*
1018 	 * Some drivers like serial.c use request_irq() heavily,
1019 	 * so we have to be careful not to interfere with a
1020 	 * running system.
1021 	 */
1022 	if (new->flags & SA_SAMPLE_RANDOM) {
1023 		/*
1024 		 * This function might sleep, we want to call it first,
1025 		 * outside of the atomic block.
1026 		 * Yes, this might clear the entropy pool if the wrong
1027 		 * driver is attempted to be loaded, without actually
1028 		 * installing a new handler, but is this really a problem,
1029 		 * only the sysadmin is able to do this.
1030 		 */
1031 		rand_initialize_irq(irq);
1032 	}
1033 
1034 	if (new->flags & SA_PERCPU_IRQ) {
1035 		desc->status |= IRQ_PER_CPU;
1036 		desc->handler = &irq_type_ia64_lsapic;
1037 	}
1038 
1039 	/*
1040 	 * The following block of code has to be executed atomically
1041 	 */
1042 	spin_lock_irqsave(&desc->lock,flags);
1043 	p = &desc->action;
1044 	if ((old = *p) != NULL) {
1045 		/* Can't share interrupts unless both agree to */
1046 		if (!(old->flags & new->flags & SA_SHIRQ)) {
1047 			spin_unlock_irqrestore(&desc->lock,flags);
1048 			return -EBUSY;
1049 		}
1050 
1051 		/* add new interrupt at end of irq queue */
1052 		do {
1053 			p = &old->next;
1054 			old = *p;
1055 		} while (old);
1056 		shared = 1;
1057 	}
1058 
1059 	*p = new;
1060 
1061 	if (!shared) {
1062 		desc->depth = 0;
1063 		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
1064 		desc->handler->startup(irq);
1065 	}
1066 	spin_unlock_irqrestore(&desc->lock,flags);
1067 
1068 	register_irq_proc(irq);
1069 	return 0;
1070 }
1071 
1072 static struct proc_dir_entry * root_irq_dir;
1073 static struct proc_dir_entry * irq_dir [NR_IRQS];
1074 
1075 #define HEX_DIGITS 8
1076 
parse_hex_value(const char * buffer,unsigned long count,unsigned long * ret)1077 static unsigned int parse_hex_value (const char *buffer,
1078 		unsigned long count, unsigned long *ret)
1079 {
1080 	unsigned char hexnum [HEX_DIGITS];
1081 	unsigned long value;
1082 	int i;
1083 
1084 	if (!count)
1085 		return -EINVAL;
1086 	if (count > HEX_DIGITS)
1087 		count = HEX_DIGITS;
1088 	if (copy_from_user(hexnum, buffer, count))
1089 		return -EFAULT;
1090 
1091 	/*
1092 	 * Parse the first 8 characters as a hex string, any non-hex char
1093 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1094 	 */
1095 	value = 0;
1096 
1097 	for (i = 0; i < count; i++) {
1098 		unsigned int c = hexnum[i];
1099 
1100 		switch (c) {
1101 			case '0' ... '9': c -= '0'; break;
1102 			case 'a' ... 'f': c -= 'a'-10; break;
1103 			case 'A' ... 'F': c -= 'A'-10; break;
1104 		default:
1105 			goto out;
1106 		}
1107 		value = (value << 4) | c;
1108 	}
1109 out:
1110 	*ret = value;
1111 	return 0;
1112 }
1113 
1114 #if CONFIG_SMP
1115 
1116 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
1117 
1118 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
1119 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
1120 
set_irq_affinity_info(int irq,int hwid,int redir)1121 void set_irq_affinity_info(int irq, int hwid, int redir)
1122 {
1123 	unsigned long mask = 1UL<<cpu_logical_id(hwid);
1124 
1125 	if (irq >= 0 && irq < NR_IRQS) {
1126 		irq_affinity[irq] = mask;
1127 		irq_redir[irq] = (char) (redir & 0xff);
1128 	}
1129 }
1130 
irq_affinity_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)1131 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1132 			int count, int *eof, void *data)
1133 {
1134 	if (count < HEX_DIGITS+3)
1135 		return -EINVAL;
1136 	return sprintf (page, "%s%08lx\n", irq_redir[(long)data] ? "r " : "",
1137 			irq_affinity[(long)data]);
1138 }
1139 
irq_affinity_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)1140 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1141 					unsigned long count, void *data)
1142 {
1143 	int irq = (long) data, full_count = count, err;
1144 	unsigned long new_value;
1145 	const char *buf = buffer;
1146 	int redir;
1147 
1148 	if (!irq_desc(irq)->handler->set_affinity)
1149 		return -EIO;
1150 
1151 	if (buf[0] == 'r' || buf[0] == 'R') {
1152 		++buf;
1153 		while (*buf == ' ') ++buf;
1154 		redir = 1;
1155 	} else
1156 		redir = 0;
1157 
1158 	err = parse_hex_value(buf, count, &new_value);
1159 
1160 	/*
1161 	 * Do not allow disabling IRQs completely - it's a too easy
1162 	 * way to make the system unusable accidentally :-) At least
1163 	 * one online CPU still has to be targeted.
1164 	 */
1165 	if (!(new_value & cpu_online_map))
1166 		return -EINVAL;
1167 
1168 	irq_desc(irq)->handler->set_affinity(irq | (redir? IA64_IRQ_REDIRECTED :0), new_value);
1169 
1170 	return full_count;
1171 }
1172 
1173 #endif /* CONFIG_SMP */
1174 
prof_cpu_mask_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)1175 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1176 			int count, int *eof, void *data)
1177 {
1178 	unsigned long *mask = (unsigned long *) data;
1179 	if (count < HEX_DIGITS+1)
1180 		return -EINVAL;
1181 	return sprintf (page, "%08lx\n", *mask);
1182 }
1183 
prof_cpu_mask_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)1184 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1185 					unsigned long count, void *data)
1186 {
1187 	unsigned long *mask = (unsigned long *) data, full_count = count, err;
1188 	unsigned long new_value;
1189 
1190 	err = parse_hex_value(buffer, count, &new_value);
1191 	if (err)
1192 		return err;
1193 
1194 	*mask = new_value;
1195 	return full_count;
1196 }
1197 
1198 #define MAX_NAMELEN 10
1199 
register_irq_proc(unsigned int irq)1200 static void register_irq_proc (unsigned int irq)
1201 {
1202 	char name [MAX_NAMELEN];
1203 
1204 	if (!root_irq_dir || (irq_desc(irq)->handler == &no_irq_type) || irq_dir[irq])
1205 		return;
1206 
1207 	memset(name, 0, MAX_NAMELEN);
1208 	sprintf(name, "%d", irq);
1209 
1210 	/* create /proc/irq/1234 */
1211 	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1212 
1213 #if CONFIG_SMP
1214 	{
1215 		struct proc_dir_entry *entry;
1216 		/* create /proc/irq/1234/smp_affinity */
1217 		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1218 
1219 		if (entry) {
1220 			entry->nlink = 1;
1221 			entry->data = (void *)(long)irq;
1222 			entry->read_proc = irq_affinity_read_proc;
1223 			entry->write_proc = irq_affinity_write_proc;
1224 		}
1225 
1226 		smp_affinity_entry[irq] = entry;
1227 	}
1228 #endif
1229 }
1230 
1231 unsigned long prof_cpu_mask = -1;
1232 
init_irq_proc(void)1233 void init_irq_proc (void)
1234 {
1235 	struct proc_dir_entry *entry;
1236 	int i;
1237 
1238 	/* create /proc/irq */
1239 	root_irq_dir = proc_mkdir("irq", 0);
1240 
1241 	/* create /proc/irq/prof_cpu_mask */
1242 	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1243 
1244 	if (!entry)
1245 		return;
1246 
1247 	entry->nlink = 1;
1248 	entry->data = (void *)&prof_cpu_mask;
1249 	entry->read_proc = prof_cpu_mask_read_proc;
1250 	entry->write_proc = prof_cpu_mask_write_proc;
1251 
1252 	/*
1253 	 * Create entries for all existing IRQs.
1254 	 */
1255 	for (i = 0; i < NR_IRQS; i++) {
1256 		if (irq_desc(i)->handler == &no_irq_type)
1257 			continue;
1258 		register_irq_proc(i);
1259 	}
1260 }
1261