1 /*
2 * linux/arch/arm/kernel/irq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
15 * should be easier.
16 *
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
19 */
20 #include <linux/config.h>
21 #include <linux/ptrace.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/list.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33
34 #include <asm/irq.h>
35 #include <asm/system.h>
36 #include <asm/mach/irq.h>
37
38 #include <asm/arch/irq.h> /* pick up fixup_irq definition */
39
40 /*
41 * Maximum IRQ count. Currently, this is arbitary. However, it should
42 * not be set too low to prevent false triggering. Conversely, if it
43 * is set too high, then you could miss a stuck IRQ.
44 *
45 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
46 */
47 #define MAX_IRQ_CNT 100000
48
49 static volatile unsigned long irq_err_count;
50 static spinlock_t irq_controller_lock;
51 static LIST_HEAD(irq_pending);
52
53 struct irqdesc irq_desc[NR_IRQS];
54 void (*init_arch_irq)(void) __initdata = NULL;
55
56 /*
57 * Dummy mask/unmask handler
58 */
dummy_mask_unmask_irq(unsigned int irq)59 static void dummy_mask_unmask_irq(unsigned int irq)
60 {
61 }
62
63 /*
64 * No architecture-specific irq_finish function defined in arm/arch/irq.h.
65 */
66 #ifndef irq_finish
67 #define irq_finish(irq) do { } while (0)
68 #endif
69
70 /**
71 * disable_irq - disable an irq and wait for completion
72 * @irq: Interrupt to disable
73 *
74 * Disable the selected interrupt line. We do this lazily.
75 *
76 * This function may be called from IRQ context.
77 */
disable_irq(unsigned int irq)78 void disable_irq(unsigned int irq)
79 {
80 struct irqdesc *desc = irq_desc + irq;
81 unsigned long flags;
82
83 spin_lock_irqsave(&irq_controller_lock, flags);
84 if (!desc->disable_depth++) {
85 #ifndef CONFIG_CPU_SA1100
86 desc->mask(irq);
87 #endif
88 }
89 spin_unlock_irqrestore(&irq_controller_lock, flags);
90 }
91
92 /**
93 * enable_irq - enable interrupt handling on an irq
94 * @irq: Interrupt to enable
95 *
96 * Re-enables the processing of interrupts on this IRQ line.
97 * Note that this may call the interrupt handler, so you may
98 * get unexpected results if you hold IRQs disabled.
99 *
100 * This function may be called from IRQ context.
101 */
enable_irq(unsigned int irq)102 void enable_irq(unsigned int irq)
103 {
104 struct irqdesc *desc = irq_desc + irq;
105 unsigned long flags;
106
107 spin_lock_irqsave(&irq_controller_lock, flags);
108 if (!desc->disable_depth) {
109 printk("enable_irq(%u) unbalanced from %p\n", irq,
110 __builtin_return_address(0));
111 } else if (!--desc->disable_depth) {
112 desc->probing = 0;
113 desc->unmask(irq);
114
115 /*
116 * If the interrupt is waiting to be processed,
117 * try to re-run it. We can't directly run it
118 * from here since the caller might be in an
119 * interrupt-protected region.
120 */
121 if (desc->pending) {
122 desc->pending = 0;
123 if (list_empty(&desc->pend))
124 list_add(&desc->pend, &irq_pending);
125 }
126 }
127 spin_unlock_irqrestore(&irq_controller_lock, flags);
128 }
129
get_irq_list(char * buf)130 int get_irq_list(char *buf)
131 {
132 int i;
133 struct irqaction * action;
134 char *p = buf;
135
136 for (i = 0 ; i < NR_IRQS ; i++) {
137 action = irq_desc[i].action;
138 if (!action)
139 continue;
140 p += sprintf(p, "%3d: %10u ", i, kstat_irqs(i));
141 p += sprintf(p, " %s", action->name);
142 for (action = action->next; action; action = action->next) {
143 p += sprintf(p, ", %s", action->name);
144 }
145 *p++ = '\n';
146 }
147
148 #ifdef CONFIG_ARCH_ACORN
149 p += get_fiq_list(p);
150 #endif
151 p += sprintf(p, "Err: %10lu\n", irq_err_count);
152 return p - buf;
153 }
154
155 /*
156 * IRQ lock detection.
157 *
158 * Hopefully, this should get us out of a few locked situations.
159 * However, it may take a while for this to happen, since we need
160 * a large number if IRQs to appear in the same jiffie with the
161 * same instruction pointer (or within 2 instructions).
162 */
check_irq_lock(struct irqdesc * desc,int irq,struct pt_regs * regs)163 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
164 {
165 unsigned long instr_ptr = instruction_pointer(regs);
166
167 if (desc->lck_jif == jiffies &&
168 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
169 desc->lck_cnt += 1;
170
171 if (desc->lck_cnt > MAX_IRQ_CNT) {
172 if (!desc->lck_warned++)
173 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
174 mod_timer(&desc->lck_timer, jiffies + 10*HZ);
175 return 1;
176 }
177 } else {
178 desc->lck_cnt = 0;
179 desc->lck_pc = instruction_pointer(regs);
180 desc->lck_jif = jiffies;
181 if (desc->lck_warned < 0)
182 desc->lck_warned ++;
183 }
184 return 0;
185 }
186
187 static void
__do_irq(unsigned int irq,struct irqaction * action,struct pt_regs * regs)188 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
189 {
190 unsigned int status;
191
192 spin_unlock(&irq_controller_lock);
193
194 if (!(action->flags & SA_INTERRUPT))
195 local_irq_enable();
196
197 status = 0;
198 do {
199 status |= action->flags;
200 action->handler(irq, action->dev_id, regs);
201 action = action->next;
202 } while (action);
203
204 if (status & SA_SAMPLE_RANDOM)
205 add_interrupt_randomness(irq);
206
207 spin_lock_irq(&irq_controller_lock);
208 }
209
210 /*
211 * do_IRQ handles all normal device IRQ's
212 */
do_IRQ(int irq,struct pt_regs * regs)213 void do_IRQ(int irq, struct pt_regs * regs)
214 {
215 struct irqdesc *desc = irq_desc + irq;
216
217 desc->triggered = 1;
218
219 /*
220 * Acknowledge and clear the IRQ, but (if its
221 * a level-based IRQ, don't mask it)
222 */
223 desc->mask_ack(irq);
224
225 /*
226 * If we're currently running this IRQ, or its disabled,
227 * we shouldn't process the IRQ. Instead, turn on the
228 * hardware masks.
229 */
230 if (desc->running || desc->disable_depth)
231 goto running;
232
233 /*
234 * Mark the IRQ currently in progress.
235 */
236 desc->running = 1;
237
238 kstat.irqs[smp_processor_id()][irq]++;
239
240 do {
241 struct irqaction *action;
242
243 action = desc->action;
244 if (!action)
245 break;
246
247 if (desc->pending && desc->disable_depth == 0) {
248 desc->pending = 0;
249 desc->unmask(irq);
250 }
251
252 __do_irq(irq, action, regs);
253 } while (desc->pending && desc->disable_depth == 0);
254
255 desc->running = 0;
256
257 /*
258 * If we are disabled or freed, shut down the handler.
259 */
260 if (desc->action && !check_irq_lock(desc, irq, regs))
261 desc->unmask(irq);
262 return;
263
264 running:
265 /*
266 * We got another IRQ while this one was masked or
267 * currently running. Delay it.
268 */
269 desc->pending = 1;
270 }
271
do_pending_irqs(struct pt_regs * regs)272 static void do_pending_irqs(struct pt_regs *regs)
273 {
274 struct list_head head, *l, *n;
275
276 do {
277 struct irqdesc *desc;
278
279 /*
280 * First, take the pending interrupts off the list.
281 * The act of calling the handlers may add some IRQs
282 * back onto the list.
283 */
284 head = irq_pending;
285 INIT_LIST_HEAD(&irq_pending);
286 head.next->prev = &head;
287 head.prev->next = &head;
288
289 /*
290 * Now run each entry. We must delete it from our
291 * list before calling the handler.
292 */
293 list_for_each_safe(l, n, &head) {
294 desc = list_entry(l, struct irqdesc, pend);
295 list_del_init(&desc->pend);
296 do_IRQ(desc - irq_desc, regs);
297 }
298
299 /*
300 * The list must be empty.
301 */
302 BUG_ON(!list_empty(&head));
303 } while (!list_empty(&irq_pending));
304 }
305
306 /*
307 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
308 * come via this function. Instead, they should provide their
309 * own 'handler'
310 */
asm_do_IRQ(int irq,struct pt_regs * regs)311 asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
312 {
313 irq = fixup_irq(irq);
314
315 /*
316 * Some hardware gives randomly wrong interrupts. Rather
317 * than crashing, do something sensible.
318 */
319 if (irq < NR_IRQS) {
320 int cpu = smp_processor_id();
321
322 irq_enter(cpu, irq);
323 spin_lock(&irq_controller_lock);
324 do_IRQ(irq, regs);
325
326 /*
327 * Now re-run any pending interrupts.
328 */
329 if (!list_empty(&irq_pending))
330 do_pending_irqs(regs);
331
332 spin_unlock(&irq_controller_lock);
333 irq_exit(cpu, irq);
334
335 if (softirq_pending(cpu))
336 do_softirq();
337
338 irq_finish(irq);
339 return;
340 }
341
342 irq_err_count += 1;
343 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
344
345 irq_finish(irq);
346 return;
347 }
348
irqlck_timeout(unsigned long _data)349 static void irqlck_timeout(unsigned long _data)
350 {
351 struct irqdesc *desc = (struct irqdesc *)_data;
352
353 spin_lock(&irq_controller_lock);
354
355 del_timer(&desc->lck_timer);
356
357 desc->lck_cnt = 0;
358 desc->lck_pc = 0;
359 desc->lck_jif = 0;
360 desc->lck_warned = -10;
361
362 if (desc->disable_depth == 0)
363 desc->unmask(desc - irq_desc);
364
365 spin_unlock(&irq_controller_lock);
366 }
367
368 #ifdef CONFIG_ARCH_ACORN
do_ecard_IRQ(int irq,struct pt_regs * regs)369 void do_ecard_IRQ(int irq, struct pt_regs *regs)
370 {
371 struct irqdesc * desc;
372 struct irqaction * action;
373 int cpu;
374
375 desc = irq_desc + irq;
376
377 cpu = smp_processor_id();
378 kstat.irqs[cpu][irq]++;
379 desc->triggered = 1;
380
381 action = desc->action;
382
383 if (action) {
384 do {
385 action->handler(irq, action->dev_id, regs);
386 action = action->next;
387 } while (action);
388 } else {
389 spin_lock(&irq_controller_lock);
390 desc->mask(irq);
391 spin_unlock(&irq_controller_lock);
392 }
393 }
394 #endif
395
setup_arm_irq(int irq,struct irqaction * new)396 int setup_arm_irq(int irq, struct irqaction * new)
397 {
398 int shared = 0;
399 struct irqaction *old, **p;
400 unsigned long flags;
401 struct irqdesc *desc;
402
403 /*
404 * Some drivers like serial.c use request_irq() heavily,
405 * so we have to be careful not to interfere with a
406 * running system.
407 */
408 if (new->flags & SA_SAMPLE_RANDOM) {
409 /*
410 * This function might sleep, we want to call it first,
411 * outside of the atomic block.
412 * Yes, this might clear the entropy pool if the wrong
413 * driver is attempted to be loaded, without actually
414 * installing a new handler, but is this really a problem,
415 * only the sysadmin is able to do this.
416 */
417 rand_initialize_irq(irq);
418 }
419
420 /*
421 * The following block of code has to be executed atomically
422 */
423 desc = irq_desc + irq;
424 spin_lock_irqsave(&irq_controller_lock, flags);
425 p = &desc->action;
426 if ((old = *p) != NULL) {
427 /* Can't share interrupts unless both agree to */
428 if (!(old->flags & new->flags & SA_SHIRQ)) {
429 spin_unlock_irqrestore(&irq_controller_lock, flags);
430 return -EBUSY;
431 }
432
433 /* add new interrupt at end of irq queue */
434 do {
435 p = &old->next;
436 old = *p;
437 } while (old);
438 shared = 1;
439 }
440
441 *p = new;
442
443 if (!shared) {
444 desc->probing = 0;
445 desc->running = 0;
446 desc->pending = 0;
447 desc->disable_depth = 1;
448 if (!desc->noautoenable) {
449 desc->disable_depth = 0;
450 desc->unmask(irq);
451 }
452 }
453
454 spin_unlock_irqrestore(&irq_controller_lock, flags);
455 return 0;
456 }
457
458 /**
459 * request_irq - allocate an interrupt line
460 * @irq: Interrupt line to allocate
461 * @handler: Function to be called when the IRQ occurs
462 * @irqflags: Interrupt type flags
463 * @devname: An ascii name for the claiming device
464 * @dev_id: A cookie passed back to the handler function
465 *
466 * This call allocates interrupt resources and enables the
467 * interrupt line and IRQ handling. From the point this
468 * call is made your handler function may be invoked. Since
469 * your handler function must clear any interrupt the board
470 * raises, you must take care both to initialise your hardware
471 * and to set up the interrupt handler in the right order.
472 *
473 * Dev_id must be globally unique. Normally the address of the
474 * device data structure is used as the cookie. Since the handler
475 * receives this value it makes sense to use it.
476 *
477 * If your interrupt is shared you must pass a non NULL dev_id
478 * as this is required when freeing the interrupt.
479 *
480 * Flags:
481 *
482 * SA_SHIRQ Interrupt is shared
483 *
484 * SA_INTERRUPT Disable local interrupts while processing
485 *
486 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
487 *
488 */
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irq_flags,const char * devname,void * dev_id)489 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
490 unsigned long irq_flags, const char * devname, void *dev_id)
491 {
492 unsigned long retval;
493 struct irqaction *action;
494
495 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
496 (irq_flags & SA_SHIRQ && !dev_id))
497 return -EINVAL;
498
499 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
500 if (!action)
501 return -ENOMEM;
502
503 action->handler = handler;
504 action->flags = irq_flags;
505 action->mask = 0;
506 action->name = devname;
507 action->next = NULL;
508 action->dev_id = dev_id;
509
510 retval = setup_arm_irq(irq, action);
511
512 if (retval)
513 kfree(action);
514 return retval;
515 }
516
517 /**
518 * free_irq - free an interrupt
519 * @irq: Interrupt line to free
520 * @dev_id: Device identity to free
521 *
522 * Remove an interrupt handler. The handler is removed and if the
523 * interrupt line is no longer in use by any driver it is disabled.
524 * On a shared IRQ the caller must ensure the interrupt is disabled
525 * on the card it drives before calling this function.
526 *
527 * This function must not be called from interrupt context.
528 */
free_irq(unsigned int irq,void * dev_id)529 void free_irq(unsigned int irq, void *dev_id)
530 {
531 struct irqaction * action, **p;
532 unsigned long flags;
533
534 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
535 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
536 #ifdef CONFIG_DEBUG_ERRORS
537 __backtrace();
538 #endif
539 return;
540 }
541
542 spin_lock_irqsave(&irq_controller_lock, flags);
543 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
544 if (action->dev_id != dev_id)
545 continue;
546
547 /* Found it - now free it */
548 *p = action->next;
549 kfree(action);
550 goto out;
551 }
552 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
553 #ifdef CONFIG_DEBUG_ERRORS
554 __backtrace();
555 #endif
556 out:
557 spin_unlock_irqrestore(&irq_controller_lock, flags);
558 }
559
560 static DECLARE_MUTEX(probe_sem);
561
562 /* Start the interrupt probing. Unlike other architectures,
563 * we don't return a mask of interrupts from probe_irq_on,
564 * but return the number of interrupts enabled for the probe.
565 * The interrupts which have been enabled for probing is
566 * instead recorded in the irq_desc structure.
567 */
probe_irq_on(void)568 unsigned long probe_irq_on(void)
569 {
570 unsigned int i, irqs = 0;
571 unsigned long delay;
572
573 down(&probe_sem);
574
575 /*
576 * first snaffle up any unassigned but
577 * probe-able interrupts
578 */
579 spin_lock_irq(&irq_controller_lock);
580 for (i = 0; i < NR_IRQS; i++) {
581 if (!irq_desc[i].probe_ok || irq_desc[i].action)
582 continue;
583
584 irq_desc[i].probing = 1;
585 irq_desc[i].triggered = 0;
586 irq_desc[i].unmask(i);
587 irqs += 1;
588 }
589 spin_unlock_irq(&irq_controller_lock);
590
591 /*
592 * wait for spurious interrupts to mask themselves out again
593 */
594 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
595 /* min 100ms delay */;
596
597 /*
598 * now filter out any obviously spurious interrupts
599 */
600 spin_lock_irq(&irq_controller_lock);
601 for (i = 0; i < NR_IRQS; i++) {
602 if (irq_desc[i].probing && irq_desc[i].triggered) {
603 irq_desc[i].probing = 0;
604 irqs -= 1;
605 }
606 }
607 spin_unlock_irq(&irq_controller_lock);
608
609 return irqs;
610 }
611
probe_irq_mask(unsigned long irqs)612 unsigned int probe_irq_mask(unsigned long irqs)
613 {
614 unsigned int mask = 0, i;
615
616 spin_lock_irq(&irq_controller_lock);
617 for (i = 0; i < 16 && i < NR_IRQS; i++)
618 if (irq_desc[i].probing && irq_desc[i].triggered)
619 mask |= 1 << i;
620 spin_unlock_irq(&irq_controller_lock);
621
622 up(&probe_sem);
623
624 return mask;
625 }
626
627 /*
628 * Possible return values:
629 * >= 0 - interrupt number
630 * -1 - no interrupt/many interrupts
631 */
probe_irq_off(unsigned long irqs)632 int probe_irq_off(unsigned long irqs)
633 {
634 unsigned int i;
635 int irq_found = NO_IRQ;
636
637 /*
638 * look at the interrupts, and find exactly one
639 * that we were probing has been triggered
640 */
641 spin_lock_irq(&irq_controller_lock);
642 for (i = 0; i < NR_IRQS; i++) {
643 if (irq_desc[i].probing &&
644 irq_desc[i].triggered) {
645 if (irq_found != NO_IRQ) {
646 irq_found = NO_IRQ;
647 goto out;
648 }
649 irq_found = i;
650 }
651 }
652
653 if (irq_found == -1)
654 irq_found = NO_IRQ;
655 out:
656 spin_unlock_irq(&irq_controller_lock);
657
658 up(&probe_sem);
659
660 return irq_found;
661 }
662
init_irq_proc(void)663 void __init init_irq_proc(void)
664 {
665 }
666
init_IRQ(void)667 void __init init_IRQ(void)
668 {
669 extern void init_dma(void);
670 int irq;
671
672 for (irq = 0; irq < NR_IRQS; irq++) {
673 irq_desc[irq].disable_depth = 1;
674 irq_desc[irq].probe_ok = 0;
675 irq_desc[irq].valid = 0;
676 irq_desc[irq].noautoenable = 0;
677 irq_desc[irq].mask_ack = dummy_mask_unmask_irq;
678 irq_desc[irq].mask = dummy_mask_unmask_irq;
679 irq_desc[irq].unmask = dummy_mask_unmask_irq;
680 INIT_LIST_HEAD(&irq_desc[irq].pend);
681 init_timer(&irq_desc[irq].lck_timer);
682 irq_desc[irq].lck_timer.data = (unsigned long)&irq_desc[irq];
683 irq_desc[irq].lck_timer.function = irqlck_timeout;
684 }
685
686 init_arch_irq();
687 init_dma();
688 }
689