1 /* $Id: sun4d_irq.c,v 1.28 2001/07/17 16:17:33 anton Exp $
2 * arch/sparc/kernel/sun4d_irq.c:
3 * SS1000/SC2000 interrupt handling.
4 *
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Heavily based on arch/sparc/kernel/irq.c.
7 */
8
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/linkage.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/spinlock.h>
23
24 #include <asm/ptrace.h>
25 #include <asm/processor.h>
26 #include <asm/system.h>
27 #include <asm/psr.h>
28 #include <asm/smp.h>
29 #include <asm/vaddrs.h>
30 #include <asm/timer.h>
31 #include <asm/openprom.h>
32 #include <asm/oplib.h>
33 #include <asm/traps.h>
34 #include <asm/irq.h>
35 #include <asm/io.h>
36 #include <asm/pgalloc.h>
37 #include <asm/pgtable.h>
38 #include <asm/sbus.h>
39 #include <asm/sbi.h>
40
41 /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
42 /* #define DISTRIBUTE_IRQS */
43
44 struct sun4d_timer_regs *sun4d_timers;
45 #define TIMER_IRQ 10
46
47 #define MAX_STATIC_ALLOC 4
48 extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
49 extern int static_irq_count;
50 unsigned char cpu_leds[32];
51 #ifdef CONFIG_SMP
52 unsigned char sbus_tid[32];
53 #endif
54
55 extern struct irqaction *irq_action[];
56
57 struct sbus_action {
58 struct irqaction *action;
59 /* For SMP this needs to be extended */
60 } *sbus_actions;
61
62 static int pil_to_sbus[] = {
63 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
64 };
65
66 static int sbus_to_pil[] = {
67 0, 2, 3, 5, 7, 9, 11, 13,
68 };
69
70 static int nsbi;
71 #ifdef CONFIG_SMP
72 spinlock_t sun4d_imsk_lock = SPIN_LOCK_UNLOCKED;
73 #endif
74
sun4d_get_irq_list(char * buf)75 int sun4d_get_irq_list(char *buf)
76 {
77 int i, j = 0, k = 0, len = 0, sbusl;
78 struct irqaction * action;
79 #ifdef CONFIG_SMP
80 int x;
81 #endif
82
83 for (i = 0 ; i < NR_IRQS ; i++) {
84 sbusl = pil_to_sbus[i];
85 if (!sbusl) {
86 action = *(i + irq_action);
87 if (!action)
88 continue;
89 } else {
90 for (j = 0; j < nsbi; j++) {
91 for (k = 0; k < 4; k++)
92 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
93 goto found_it;
94 }
95 continue;
96 }
97 found_it: len += sprintf(buf+len, "%3d: ", i);
98 #ifndef CONFIG_SMP
99 len += sprintf(buf+len, "%10u ", kstat_irqs(i));
100 #else
101 for (x = 0; x < smp_num_cpus; x++)
102 len += sprintf(buf+len, "%10u ",
103 kstat.irqs[cpu_logical_map(x)][i]);
104 #endif
105 len += sprintf(buf+len, "%c %s",
106 (action->flags & SA_INTERRUPT) ? '+' : ' ',
107 action->name);
108 action = action->next;
109 for (;;) {
110 for (; action; action = action->next) {
111 len += sprintf(buf+len, ",%s %s",
112 (action->flags & SA_INTERRUPT) ? " +" : "",
113 action->name);
114 }
115 if (!sbusl) break;
116 k++;
117 if (k < 4)
118 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
119 else {
120 j++;
121 if (j == nsbi) break;
122 k = 0;
123 action = sbus_actions [(j << 5) + (sbusl << 2)].action;
124 }
125 }
126 len += sprintf(buf+len, "\n");
127 }
128 return len;
129 }
130
sun4d_free_irq(unsigned int irq,void * dev_id)131 void sun4d_free_irq(unsigned int irq, void *dev_id)
132 {
133 struct irqaction *action, **actionp;
134 struct irqaction *tmp = NULL;
135 unsigned long flags;
136
137 if (irq < 15)
138 actionp = irq + irq_action;
139 else
140 actionp = &(sbus_actions[irq - (1 << 5)].action);
141 action = *actionp;
142 if (!action) {
143 printk("Trying to free free IRQ%d\n",irq);
144 return;
145 }
146 if (dev_id) {
147 for (; action; action = action->next) {
148 if (action->dev_id == dev_id)
149 break;
150 tmp = action;
151 }
152 if (!action) {
153 printk("Trying to free free shared IRQ%d\n",irq);
154 return;
155 }
156 } else if (action->flags & SA_SHIRQ) {
157 printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
158 return;
159 }
160 if (action->flags & SA_STATIC_ALLOC)
161 {
162 /* This interrupt is marked as specially allocated
163 * so it is a bad idea to free it.
164 */
165 printk("Attempt to free statically allocated IRQ%d (%s)\n",
166 irq, action->name);
167 return;
168 }
169
170 save_and_cli(flags);
171 if (action && tmp)
172 tmp->next = action->next;
173 else
174 *actionp = action->next;
175
176 kfree(action);
177
178 if (!(*actionp))
179 disable_irq(irq);
180
181 restore_flags(flags);
182 }
183
184 extern void unexpected_irq(int, void *, struct pt_regs *);
185
sun4d_handler_irq(int irq,struct pt_regs * regs)186 void sun4d_handler_irq(int irq, struct pt_regs * regs)
187 {
188 struct irqaction * action;
189 int cpu = smp_processor_id();
190 /* SBUS IRQ level (1 - 7) */
191 int sbusl = pil_to_sbus[irq];
192
193 /* FIXME: Is this necessary?? */
194 cc_get_ipen();
195
196 cc_set_iclr(1 << irq);
197
198 irq_enter(cpu, irq);
199 kstat.irqs[cpu][irq]++;
200 if (!sbusl) {
201 action = *(irq + irq_action);
202 if (!action)
203 unexpected_irq(irq, 0, regs);
204 do {
205 action->handler(irq, action->dev_id, regs);
206 action = action->next;
207 } while (action);
208 } else {
209 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
210 int sbino;
211 struct sbus_action *actionp;
212 unsigned mask, slot;
213 int sbil = (sbusl << 2);
214
215 bw_clear_intr_mask(sbusl, bus_mask);
216
217 /* Loop for each pending SBI */
218 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
219 if (bus_mask & 1) {
220 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
221 mask &= (0xf << sbil);
222 actionp = sbus_actions + (sbino << 5) + (sbil);
223 /* Loop for each pending SBI slot */
224 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
225 if (mask & slot) {
226 mask &= ~slot;
227 action = actionp->action;
228
229 if (!action)
230 unexpected_irq(irq, 0, regs);
231 do {
232 action->handler(irq, action->dev_id, regs);
233 action = action->next;
234 } while (action);
235 release_sbi(SBI2DEVID(sbino), slot);
236 }
237 }
238 }
239 irq_exit(cpu, irq);
240 if (softirq_pending(cpu))
241 do_softirq();
242 }
243
sun4d_build_irq(struct sbus_dev * sdev,int irq)244 unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
245 {
246 int sbusl = pil_to_sbus[irq];
247
248 if (sbusl)
249 return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot;
250 else
251 return irq;
252 }
253
sun4d_sbint_to_irq(struct sbus_dev * sdev,unsigned int sbint)254 unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
255 {
256 if (sbint >= sizeof(sbus_to_pil)) {
257 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
258 BUG();
259 }
260 return sun4d_build_irq(sdev, sbus_to_pil[sbint]);
261 }
262
sun4d_request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)263 int sun4d_request_irq(unsigned int irq,
264 void (*handler)(int, void *, struct pt_regs *),
265 unsigned long irqflags, const char * devname, void *dev_id)
266 {
267 struct irqaction *action, *tmp = NULL, **actionp;
268 unsigned long flags;
269
270 if(irq > 14 && irq < (1 << 5))
271 return -EINVAL;
272
273 if (!handler)
274 return -EINVAL;
275
276 if (irq >= (1 << 5))
277 actionp = &(sbus_actions[irq - (1 << 5)].action);
278 else
279 actionp = irq + irq_action;
280 action = *actionp;
281
282 if (action) {
283 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
284 for (tmp = action; tmp->next; tmp = tmp->next);
285 } else {
286 return -EBUSY;
287 }
288 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
289 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
290 return -EBUSY;
291 }
292 action = NULL; /* Or else! */
293 }
294
295 save_and_cli(flags);
296
297 /* If this is flagged as statically allocated then we use our
298 * private struct which is never freed.
299 */
300 if (irqflags & SA_STATIC_ALLOC) {
301 if (static_irq_count < MAX_STATIC_ALLOC)
302 action = &static_irqaction[static_irq_count++];
303 else
304 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
305 }
306
307 if (action == NULL)
308 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
309 GFP_KERNEL);
310
311 if (!action) {
312 restore_flags(flags);
313 return -ENOMEM;
314 }
315
316 action->handler = handler;
317 action->flags = irqflags;
318 action->mask = 0;
319 action->name = devname;
320 action->next = NULL;
321 action->dev_id = dev_id;
322
323 if (tmp)
324 tmp->next = action;
325 else
326 *actionp = action;
327
328 enable_irq(irq);
329 restore_flags(flags);
330 return 0;
331 }
332
sun4d_disable_irq(unsigned int irq)333 static void sun4d_disable_irq(unsigned int irq)
334 {
335 #ifdef CONFIG_SMP
336 int tid = sbus_tid[(irq >> 5) - 1];
337 unsigned long flags;
338 #endif
339
340 if (irq < NR_IRQS) return;
341 #ifdef CONFIG_SMP
342 spin_lock_irqsave(&sun4d_imsk_lock, flags);
343 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
344 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
345 #else
346 cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
347 #endif
348 }
349
sun4d_enable_irq(unsigned int irq)350 static void sun4d_enable_irq(unsigned int irq)
351 {
352 #ifdef CONFIG_SMP
353 int tid = sbus_tid[(irq >> 5) - 1];
354 unsigned long flags;
355 #endif
356
357 if (irq < NR_IRQS) return;
358 #ifdef CONFIG_SMP
359 spin_lock_irqsave(&sun4d_imsk_lock, flags);
360 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
361 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
362 #else
363 cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
364 #endif
365 }
366
367 #ifdef CONFIG_SMP
sun4d_set_cpu_int(int cpu,int level)368 static void sun4d_set_cpu_int(int cpu, int level)
369 {
370 sun4d_send_ipi(cpu, level);
371 }
372
sun4d_clear_ipi(int cpu,int level)373 static void sun4d_clear_ipi(int cpu, int level)
374 {
375 }
376
sun4d_set_udt(int cpu)377 static void sun4d_set_udt(int cpu)
378 {
379 }
380
381 /* Setup IRQ distribution scheme. */
sun4d_distribute_irqs(void)382 void __init sun4d_distribute_irqs(void)
383 {
384 #ifdef DISTRIBUTE_IRQS
385 struct sbus_bus *sbus;
386 unsigned long sbus_serving_map;
387
388 sbus_serving_map = cpu_present_map;
389 for_each_sbus(sbus) {
390 if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1))))
391 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
392 else if (cpu_present_map & (1 << (sbus->board * 2)))
393 sbus_tid[sbus->board] = (sbus->board * 2);
394 else if (cpu_present_map & (1 << (sbus->board * 2 + 1)))
395 sbus_tid[sbus->board] = (sbus->board * 2 + 1);
396 else
397 sbus_tid[sbus->board] = 0xff;
398 if (sbus_tid[sbus->board] != 0xff)
399 sbus_serving_map &= ~(1 << sbus_tid[sbus->board]);
400 }
401 for_each_sbus(sbus)
402 if (sbus_tid[sbus->board] == 0xff) {
403 int i = 31;
404
405 if (!sbus_serving_map)
406 sbus_serving_map = cpu_present_map;
407 while (!(sbus_serving_map & (1 << i)))
408 i--;
409 sbus_tid[sbus->board] = i;
410 sbus_serving_map &= ~(1 << i);
411 }
412 for_each_sbus(sbus) {
413 printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]);
414 set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3);
415 }
416 #else
417 struct sbus_bus *sbus;
418 int cpuid = cpu_logical_map(1);
419
420 if (cpuid == -1)
421 cpuid = cpu_logical_map(0);
422 for_each_sbus(sbus) {
423 sbus_tid[sbus->board] = cpuid;
424 set_sbi_tid(sbus->devid, cpuid << 3);
425 }
426 printk("All sbus IRQs directed to CPU%d\n", cpuid);
427 #endif
428 }
429 #endif
430
sun4d_clear_clock_irq(void)431 static void sun4d_clear_clock_irq(void)
432 {
433 volatile unsigned int clear_intr;
434 clear_intr = sun4d_timers->l10_timer_limit;
435 }
436
sun4d_clear_profile_irq(int cpu)437 static void sun4d_clear_profile_irq(int cpu)
438 {
439 bw_get_prof_limit(cpu);
440 }
441
sun4d_load_profile_irq(int cpu,unsigned int limit)442 static void sun4d_load_profile_irq(int cpu, unsigned int limit)
443 {
444 bw_set_prof_limit(cpu, limit);
445 }
446
sun4d_init_timers(void (* counter_fn)(int,void *,struct pt_regs *))447 static void __init sun4d_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
448 {
449 int irq;
450 extern struct prom_cpuinfo linux_cpus[NR_CPUS];
451 int cpu;
452 struct resource r;
453
454 /* Map the User Timer registers. */
455 memset(&r, 0, sizeof(r));
456 #ifdef CONFIG_SMP
457 r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT;
458 #else
459 r.start = CSR_BASE(0)+BW_TIMER_LIMIT;
460 #endif
461 r.flags = 0xf;
462 sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0,
463 PAGE_SIZE, "user timer");
464
465 sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
466 master_l10_counter = &sun4d_timers->l10_cur_count;
467 master_l10_limit = &sun4d_timers->l10_timer_limit;
468
469 irq = request_irq(TIMER_IRQ,
470 counter_fn,
471 (SA_INTERRUPT | SA_STATIC_ALLOC),
472 "timer", NULL);
473 if (irq) {
474 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
475 prom_halt();
476 }
477
478 /* Enable user timer free run for CPU 0 in BW */
479 /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
480
481 for(cpu = 0; cpu < linux_num_cpus; cpu++)
482 sun4d_load_profile_irq((linux_cpus[cpu].mid >> 3), 0);
483
484 #ifdef CONFIG_SMP
485 {
486 unsigned long flags;
487 extern unsigned long lvl14_save[4];
488 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
489 extern unsigned int real_irq_entry[], smp4d_ticker[];
490 extern unsigned int patchme_maybe_smp_msg[];
491
492 /* Adjust so that we jump directly to smp4d_ticker */
493 lvl14_save[2] += smp4d_ticker - real_irq_entry;
494
495 /* For SMP we use the level 14 ticker, however the bootup code
496 * has copied the firmwares level 14 vector into boot cpu's
497 * trap table, we must fix this now or we get squashed.
498 */
499 __save_and_cli(flags);
500 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
501 trap_table->inst_one = lvl14_save[0];
502 trap_table->inst_two = lvl14_save[1];
503 trap_table->inst_three = lvl14_save[2];
504 trap_table->inst_four = lvl14_save[3];
505 local_flush_cache_all();
506 __restore_flags(flags);
507 }
508 #endif
509 }
510
sun4d_init_sbi_irq(void)511 void __init sun4d_init_sbi_irq(void)
512 {
513 struct sbus_bus *sbus;
514 unsigned mask;
515
516 nsbi = 0;
517 for_each_sbus(sbus)
518 nsbi++;
519 sbus_actions = (struct sbus_action *)kmalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
520 memset (sbus_actions, 0, (nsbi * 8 * 4 * sizeof(struct sbus_action)));
521 for_each_sbus(sbus) {
522 #ifdef CONFIG_SMP
523 extern unsigned char boot_cpu_id;
524
525 set_sbi_tid(sbus->devid, boot_cpu_id << 3);
526 sbus_tid[sbus->board] = boot_cpu_id;
527 #endif
528 /* Get rid of pending irqs from PROM */
529 mask = acquire_sbi(sbus->devid, 0xffffffff);
530 if (mask) {
531 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, sbus->board);
532 release_sbi(sbus->devid, mask);
533 }
534 }
535 }
536
sun4d_irq_itoa(unsigned int irq)537 static char *sun4d_irq_itoa(unsigned int irq)
538 {
539 static char buff[16];
540
541 if (irq < (1 << 5))
542 sprintf(buff, "%d", irq);
543 else
544 sprintf(buff, "%d,%x", sbus_to_pil[(irq >> 2) & 7], irq);
545 return buff;
546 }
547
sun4d_init_IRQ(void)548 void __init sun4d_init_IRQ(void)
549 {
550 __cli();
551
552 BTFIXUPSET_CALL(sbint_to_irq, sun4d_sbint_to_irq, BTFIXUPCALL_NORM);
553 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
554 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
555 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
556 BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
557 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
558 BTFIXUPSET_CALL(__irq_itoa, sun4d_irq_itoa, BTFIXUPCALL_NORM);
559 sparc_init_timers = sun4d_init_timers;
560 #ifdef CONFIG_SMP
561 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
562 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
563 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
564 #endif
565 /* Cannot enable interrupts until OBP ticker is disabled. */
566 }
567