1 /* $Id: irq.c,v 1.113 2001/07/17 16:17:33 anton Exp $
2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
3 * Sparc the IRQ's are basically 'cast in stone'
4 * and you are supposed to probe the prom's device
5 * node trees to find out who's got which IRQ.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
9 * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
12 */
13
14 #include <linux/config.h>
15 #include <linux/ptrace.h>
16 #include <linux/errno.h>
17 #include <linux/linkage.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/init.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/delay.h>
28 #include <linux/threads.h>
29 #include <linux/spinlock.h>
30
31 #include <asm/ptrace.h>
32 #include <asm/processor.h>
33 #include <asm/system.h>
34 #include <asm/psr.h>
35 #include <asm/smp.h>
36 #include <asm/vaddrs.h>
37 #include <asm/timer.h>
38 #include <asm/openprom.h>
39 #include <asm/oplib.h>
40 #include <asm/traps.h>
41 #include <asm/irq.h>
42 #include <asm/io.h>
43 #include <asm/pgalloc.h>
44 #include <asm/pgtable.h>
45 #include <asm/hardirq.h>
46 #include <asm/softirq.h>
47 #include <asm/pcic.h>
48
49 /*
50 * Dave Redman (djhr@tadpole.co.uk)
51 *
52 * IRQ numbers.. These are no longer restricted to 15..
53 *
54 * this is done to enable SBUS cards and onboard IO to be masked
55 * correctly. using the interrupt level isn't good enough.
56 *
57 * For example:
58 * A device interrupting at sbus level6 and the Floppy both come in
59 * at IRQ11, but enabling and disabling them requires writing to
60 * different bits in the SLAVIO/SEC.
61 *
62 * As a result of these changes sun4m machines could now support
63 * directed CPU interrupts using the existing enable/disable irq code
64 * with tweaks.
65 *
66 */
67
irq_panic(void)68 static void irq_panic(void)
69 {
70 extern char *cputypval;
71 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
72 prom_halt();
73 }
74
75 void (*sparc_init_timers)(void (*)(int, void *,struct pt_regs *)) =
76 (void (*)(void (*)(int, void *,struct pt_regs *))) irq_panic;
77
78 /*
79 * Dave Redman (djhr@tadpole.co.uk)
80 *
81 * There used to be extern calls and hard coded values here.. very sucky!
82 * instead, because some of the devices attach very early, I do something
83 * equally sucky but at least we'll never try to free statically allocated
84 * space or call kmalloc before kmalloc_init :(.
85 *
86 * In fact it's the timer10 that attaches first.. then timer14
87 * then kmalloc_init is called.. then the tty interrupts attach.
88 * hmmm....
89 *
90 */
91 #define MAX_STATIC_ALLOC 4
92 struct irqaction static_irqaction[MAX_STATIC_ALLOC];
93 int static_irq_count;
94
95 struct irqaction *irq_action[NR_IRQS] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98 };
99
get_irq_list(char * buf)100 int get_irq_list(char *buf)
101 {
102 int i, len = 0;
103 struct irqaction * action;
104 #ifdef CONFIG_SMP
105 int j;
106 #endif
107
108 if (sparc_cpu_model == sun4d) {
109 extern int sun4d_get_irq_list(char *);
110
111 return sun4d_get_irq_list(buf);
112 }
113 for (i = 0 ; i < NR_IRQS ; i++) {
114 action = *(i + irq_action);
115 if (!action)
116 continue;
117 len += sprintf(buf+len, "%3d: ", i);
118 #ifndef CONFIG_SMP
119 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
120 #else
121 for (j = 0; j < smp_num_cpus; j++)
122 len += sprintf(buf+len, "%10u ",
123 kstat.irqs[cpu_logical_map(j)][i]);
124 #endif
125 len += sprintf(buf+len, " %c %s",
126 (action->flags & SA_INTERRUPT) ? '+' : ' ',
127 action->name);
128 for (action=action->next; action; action = action->next) {
129 len += sprintf(buf+len, ",%s %s",
130 (action->flags & SA_INTERRUPT) ? " +" : "",
131 action->name);
132 }
133 len += sprintf(buf+len, "\n");
134 }
135 return len;
136 }
137
free_irq(unsigned int irq,void * dev_id)138 void free_irq(unsigned int irq, void *dev_id)
139 {
140 struct irqaction * action;
141 struct irqaction * tmp = NULL;
142 unsigned long flags;
143 unsigned int cpu_irq;
144
145 if (sparc_cpu_model == sun4d) {
146 extern void sun4d_free_irq(unsigned int, void *);
147
148 return sun4d_free_irq(irq, dev_id);
149 }
150 cpu_irq = irq & (NR_IRQS - 1);
151 action = *(cpu_irq + irq_action);
152 if (cpu_irq > 14) { /* 14 irq levels on the sparc */
153 printk("Trying to free bogus IRQ %d\n", irq);
154 return;
155 }
156 if (!action->handler) {
157 printk("Trying to free free IRQ%d\n",irq);
158 return;
159 }
160 if (dev_id) {
161 for (; action; action = action->next) {
162 if (action->dev_id == dev_id)
163 break;
164 tmp = action;
165 }
166 if (!action) {
167 printk("Trying to free free shared IRQ%d\n",irq);
168 return;
169 }
170 } else if (action->flags & SA_SHIRQ) {
171 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
172 return;
173 }
174 if (action->flags & SA_STATIC_ALLOC)
175 {
176 /* This interrupt is marked as specially allocated
177 * so it is a bad idea to free it.
178 */
179 printk("Attempt to free statically allocated IRQ%d (%s)\n",
180 irq, action->name);
181 return;
182 }
183
184 save_and_cli(flags);
185 if (action && tmp)
186 tmp->next = action->next;
187 else
188 *(cpu_irq + irq_action) = action->next;
189
190 kfree(action);
191
192 if (!(*(cpu_irq + irq_action)))
193 disable_irq(irq);
194
195 restore_flags(flags);
196 }
197
198 #ifdef CONFIG_SMP
199
200 /* Who has the global irq brlock */
201 unsigned char global_irq_holder = NO_PROC_ID;
202
203 void smp_show_backtrace_all_cpus(void);
204 void show_backtrace(void);
205
206 #define VERBOSE_DEBUG_IRQLOCK
207 #define MAXCOUNT 100000000
208
show(char * str)209 static void show(char * str)
210 {
211 int cpu = smp_processor_id();
212 int i;
213
214 printk("\n%s, CPU %d:\n", str, cpu);
215 printk("irq: %d [ ", irqs_running());
216 for (i = 0; i < smp_num_cpus; i++)
217 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
218 printk("]\nbh: %d [ ",
219 (spin_is_locked(&global_bh_lock) ? 1 : 0));
220 for (i = 0; i < smp_num_cpus; i++)
221 printk("%u ", local_bh_count(i));
222 printk("]\n");
223
224 #ifdef VERBOSE_DEBUG_IRQLOCK
225 smp_show_backtrace_all_cpus();
226 #else
227 show_backtrace();
228 #endif
229 }
230
231
232 /*
233 * We have to allow irqs to arrive between __sti and __cli
234 */
235 #define SYNC_OTHER_CORES(x) barrier()
236
237 /*
238 * This is called when we want to synchronize with
239 * interrupts. We may for example tell a device to
240 * stop sending interrupts: but to make sure there
241 * are no interrupts that are executing on another
242 * CPU we need to call this function.
243 */
synchronize_irq(void)244 void synchronize_irq(void)
245 {
246 if (irqs_running()) {
247 cli();
248 sti();
249 }
250 }
251
get_irqlock(int cpu)252 static inline void get_irqlock(int cpu)
253 {
254 int count;
255
256 if ((unsigned char)cpu == global_irq_holder)
257 return;
258
259 count = MAXCOUNT;
260 again:
261 br_write_lock(BR_GLOBALIRQ_LOCK);
262 for (;;) {
263 spinlock_t *lock;
264
265 if (!irqs_running() &&
266 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
267 break;
268
269 br_write_unlock(BR_GLOBALIRQ_LOCK);
270 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
271 while (irqs_running() ||
272 spin_is_locked(lock) ||
273 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
274 if (!--count) {
275 show("get_irqlock");
276 count = (~0 >> 1);
277 }
278 __sti();
279 SYNC_OTHER_CORES(cpu);
280 __cli();
281 }
282 goto again;
283 }
284
285 global_irq_holder = cpu;
286 }
287
288 /*
289 * A global "cli()" while in an interrupt context
290 * turns into just a local cli(). Interrupts
291 * should use spinlocks for the (very unlikely)
292 * case that they ever want to protect against
293 * each other.
294 *
295 * If we already have local interrupts disabled,
296 * this will not turn a local disable into a
297 * global one (problems with spinlocks: this makes
298 * save_flags+cli+sti usable inside a spinlock).
299 */
__global_cli(void)300 void __global_cli(void)
301 {
302 unsigned long flags;
303
304 __save_flags(flags);
305
306 if ((flags & PSR_PIL) != PSR_PIL) {
307 int cpu = smp_processor_id();
308 __cli();
309 if (!local_irq_count(cpu))
310 get_irqlock(cpu);
311 }
312 }
313
__global_sti(void)314 void __global_sti(void)
315 {
316 int cpu = smp_processor_id();
317
318 if (!local_irq_count(cpu))
319 release_irqlock(cpu);
320 __sti();
321 }
322
323 /*
324 * SMP flags value to restore to:
325 * 0 - global cli
326 * 1 - global sti
327 * 2 - local cli
328 * 3 - local sti
329 */
__global_save_flags(void)330 unsigned long __global_save_flags(void)
331 {
332 unsigned long flags, retval;
333 unsigned long local_enabled = 0;
334
335 __save_flags(flags);
336
337 if ((flags & PSR_PIL) != PSR_PIL)
338 local_enabled = 1;
339
340 /* default to local */
341 retval = 2 + local_enabled;
342
343 /* check for global flags if we're not in an interrupt */
344 if (!local_irq_count(smp_processor_id())) {
345 if (local_enabled)
346 retval = 1;
347 if (global_irq_holder == (unsigned char) smp_processor_id())
348 retval = 0;
349 }
350 return retval;
351 }
352
__global_restore_flags(unsigned long flags)353 void __global_restore_flags(unsigned long flags)
354 {
355 switch (flags) {
356 case 0:
357 __global_cli();
358 break;
359 case 1:
360 __global_sti();
361 break;
362 case 2:
363 __cli();
364 break;
365 case 3:
366 __sti();
367 break;
368 default:
369 {
370 unsigned long pc;
371 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
372 printk("global_restore_flags: Bogon flags(%08lx) caller %08lx\n", flags, pc);
373 }
374 }
375 }
376
377 #endif /* CONFIG_SMP */
378
unexpected_irq(int irq,void * dev_id,struct pt_regs * regs)379 void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
380 {
381 int i;
382 struct irqaction * action;
383 unsigned int cpu_irq;
384
385 cpu_irq = irq & (NR_IRQS - 1);
386 action = *(cpu_irq + irq_action);
387
388 printk("IO device interrupt, irq = %d\n", irq);
389 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
390 regs->npc, regs->u_regs[14]);
391 if (action) {
392 printk("Expecting: ");
393 for (i = 0; i < 16; i++)
394 if (action->handler)
395 printk("[%s:%d:0x%x] ", action->name,
396 (int) i, (unsigned int) action->handler);
397 }
398 printk("AIEEE\n");
399 panic("bogus interrupt received");
400 }
401
handler_irq(int irq,struct pt_regs * regs)402 void handler_irq(int irq, struct pt_regs * regs)
403 {
404 struct irqaction * action;
405 int cpu = smp_processor_id();
406 #ifdef CONFIG_SMP
407 extern void smp4m_irq_rotate(int cpu);
408 #endif
409
410 irq_enter(cpu, irq);
411 disable_pil_irq(irq);
412 #ifdef CONFIG_SMP
413 /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
414 if(irq < 10)
415 smp4m_irq_rotate(cpu);
416 #endif
417 action = *(irq + irq_action);
418 kstat.irqs[cpu][irq]++;
419 do {
420 if (!action || !action->handler)
421 unexpected_irq(irq, 0, regs);
422 action->handler(irq, action->dev_id, regs);
423 action = action->next;
424 } while (action);
425 enable_pil_irq(irq);
426 irq_exit(cpu, irq);
427 if (softirq_pending(cpu))
428 do_softirq();
429 }
430
431 #ifdef CONFIG_BLK_DEV_FD
432 extern void floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
433
sparc_floppy_irq(int irq,void * dev_id,struct pt_regs * regs)434 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
435 {
436 int cpu = smp_processor_id();
437
438 disable_pil_irq(irq);
439 irq_enter(cpu, irq);
440 kstat.irqs[cpu][irq]++;
441 floppy_interrupt(irq, dev_id, regs);
442 irq_exit(cpu, irq);
443 enable_pil_irq(irq);
444 if (softirq_pending(cpu))
445 do_softirq();
446 }
447 #endif
448
449 /* Fast IRQ's on the Sparc can only have one routine attached to them,
450 * thus no sharing possible.
451 */
request_fast_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname)452 int request_fast_irq(unsigned int irq,
453 void (*handler)(int, void *, struct pt_regs *),
454 unsigned long irqflags, const char *devname)
455 {
456 struct irqaction *action;
457 unsigned long flags;
458 unsigned int cpu_irq;
459 #ifdef CONFIG_SMP
460 struct tt_entry *trap_table;
461 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
462 #endif
463
464 cpu_irq = irq & (NR_IRQS - 1);
465 if(cpu_irq > 14)
466 return -EINVAL;
467 if(!handler)
468 return -EINVAL;
469 action = *(cpu_irq + irq_action);
470 if(action) {
471 if(action->flags & SA_SHIRQ)
472 panic("Trying to register fast irq when already shared.\n");
473 if(irqflags & SA_SHIRQ)
474 panic("Trying to register fast irq as shared.\n");
475
476 /* Anyway, someone already owns it so cannot be made fast. */
477 printk("request_fast_irq: Trying to register yet already owned.\n");
478 return -EBUSY;
479 }
480
481 save_and_cli(flags);
482
483 /* If this is flagged as statically allocated then we use our
484 * private struct which is never freed.
485 */
486 if (irqflags & SA_STATIC_ALLOC) {
487 if (static_irq_count < MAX_STATIC_ALLOC)
488 action = &static_irqaction[static_irq_count++];
489 else
490 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
491 irq, devname);
492 }
493
494 if (action == NULL)
495 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
496 GFP_KERNEL);
497
498 if (!action) {
499 restore_flags(flags);
500 return -ENOMEM;
501 }
502
503 /* Dork with trap table if we get this far. */
504 #define INSTANTIATE(table) \
505 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
506 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
507 SPARC_BRANCH((unsigned long) handler, \
508 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
509 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
510 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
511
512 INSTANTIATE(sparc_ttable)
513 #ifdef CONFIG_SMP
514 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
515 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
516 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
517 #endif
518 #undef INSTANTIATE
519 /*
520 * XXX Correct thing whould be to flush only I- and D-cache lines
521 * which contain the handler in question. But as of time of the
522 * writing we have no CPU-neutral interface to fine-grained flushes.
523 */
524 flush_cache_all();
525
526 action->handler = handler;
527 action->flags = irqflags;
528 action->mask = 0;
529 action->name = devname;
530 action->dev_id = NULL;
531 action->next = NULL;
532
533 *(cpu_irq + irq_action) = action;
534
535 enable_irq(irq);
536 restore_flags(flags);
537 return 0;
538 }
539
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)540 int request_irq(unsigned int irq,
541 void (*handler)(int, void *, struct pt_regs *),
542 unsigned long irqflags, const char * devname, void *dev_id)
543 {
544 struct irqaction * action, *tmp = NULL;
545 unsigned long flags;
546 unsigned int cpu_irq;
547
548 if (sparc_cpu_model == sun4d) {
549 extern int sun4d_request_irq(unsigned int,
550 void (*)(int, void *, struct pt_regs *),
551 unsigned long, const char *, void *);
552 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
553 }
554 cpu_irq = irq & (NR_IRQS - 1);
555 if(cpu_irq > 14)
556 return -EINVAL;
557
558 if (!handler)
559 return -EINVAL;
560
561 action = *(cpu_irq + irq_action);
562 if (action) {
563 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
564 for (tmp = action; tmp->next; tmp = tmp->next);
565 } else {
566 return -EBUSY;
567 }
568 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
569 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
570 return -EBUSY;
571 }
572 action = NULL; /* Or else! */
573 }
574
575 save_and_cli(flags);
576
577 /* If this is flagged as statically allocated then we use our
578 * private struct which is never freed.
579 */
580 if (irqflags & SA_STATIC_ALLOC) {
581 if (static_irq_count < MAX_STATIC_ALLOC)
582 action = &static_irqaction[static_irq_count++];
583 else
584 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
585 }
586
587 if (action == NULL)
588 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
589 GFP_KERNEL);
590
591 if (!action) {
592 restore_flags(flags);
593 return -ENOMEM;
594 }
595
596 action->handler = handler;
597 action->flags = irqflags;
598 action->mask = 0;
599 action->name = devname;
600 action->next = NULL;
601 action->dev_id = dev_id;
602
603 if (tmp)
604 tmp->next = action;
605 else
606 *(cpu_irq + irq_action) = action;
607
608 enable_irq(irq);
609 restore_flags(flags);
610 return 0;
611 }
612
613 /* We really don't need these at all on the Sparc. We only have
614 * stubs here because they are exported to modules.
615 */
probe_irq_on(void)616 unsigned long probe_irq_on(void)
617 {
618 return 0;
619 }
620
probe_irq_off(unsigned long mask)621 int probe_irq_off(unsigned long mask)
622 {
623 return 0;
624 }
625
626 /* djhr
627 * This could probably be made indirect too and assigned in the CPU
628 * bits of the code. That would be much nicer I think and would also
629 * fit in with the idea of being able to tune your kernel for your machine
630 * by removing unrequired machine and device support.
631 *
632 */
633
init_IRQ(void)634 void __init init_IRQ(void)
635 {
636 extern void sun4c_init_IRQ( void );
637 extern void sun4m_init_IRQ( void );
638 extern void sun4d_init_IRQ( void );
639
640 switch(sparc_cpu_model) {
641 case sun4c:
642 case sun4:
643 sun4c_init_IRQ();
644 break;
645
646 case sun4m:
647 #ifdef CONFIG_PCI
648 pcic_probe();
649 if (pcic_present()) {
650 sun4m_pci_init_IRQ();
651 break;
652 }
653 #endif
654 sun4m_init_IRQ();
655 break;
656
657 case sun4d:
658 sun4d_init_IRQ();
659 break;
660
661 default:
662 prom_printf("Cannot initialize IRQ's on this Sun machine...");
663 break;
664 }
665 btfixup();
666 }
667
init_irq_proc(void)668 void init_irq_proc(void)
669 {
670 /* For now, nothing... */
671 }
672