1 /* $Id: irq.c,v 1.112 2001/11/16 00:04:54 kanoj Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9 #include <linux/config.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/kbd_ll.h>
22
23 #include <asm/ptrace.h>
24 #include <asm/processor.h>
25 #include <asm/atomic.h>
26 #include <asm/system.h>
27 #include <asm/irq.h>
28 #include <asm/sbus.h>
29 #include <asm/iommu.h>
30 #include <asm/upa.h>
31 #include <asm/oplib.h>
32 #include <asm/timer.h>
33 #include <asm/smp.h>
34 #include <asm/hardirq.h>
35 #include <asm/softirq.h>
36 #include <asm/starfire.h>
37 #include <asm/uaccess.h>
38 #include <asm/cache.h>
39
40 #ifdef CONFIG_SMP
41 static void distribute_irqs(void);
42 #endif
43
44 /* UPA nodes send interrupt packet to UltraSparc with first data reg
45 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
46 * delivered. We must translate this into a non-vector IRQ so we can
47 * set the softint on this cpu.
48 *
49 * To make processing these packets efficient and race free we use
50 * an array of irq buckets below. The interrupt vector handler in
51 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
52 * The IVEC handler does not need to act atomically, the PIL dispatch
53 * code uses CAS to get an atomic snapshot of the list and clear it
54 * at the same time.
55 */
56
57 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
58
59 #ifndef CONFIG_SMP
60 unsigned int __up_workvec[16] __attribute__ ((aligned (SMP_CACHE_BYTES)));
61 #define irq_work(__cpu, __pil) &(__up_workvec[(void)(__cpu), (__pil)])
62 #else
63 #define irq_work(__cpu, __pil) &(cpu_data[(__cpu)].irq_worklists[(__pil)])
64 #endif
65
66 #ifdef CONFIG_PCI
67 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
68 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
69 * for devices behind busses other than APB on Sabre systems.
70 *
71 * Currently these physical addresses are just config space accesses
72 * to the command register for that device.
73 */
74 unsigned long pci_dma_wsync;
75 unsigned long dma_sync_reg_table[256];
76 unsigned char dma_sync_reg_table_entry = 0;
77 #endif
78
79 /* This is based upon code in the 32-bit Sparc kernel written mostly by
80 * David Redman (djhr@tadpole.co.uk).
81 */
82 #define MAX_STATIC_ALLOC 4
83 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
84 static int static_irq_count;
85
86 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
87 struct irqaction *irq_action[NR_IRQS+1] = {
88 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
89 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
90 };
91
92 static void register_irq_proc (unsigned int irq);
93
94 /*
95 * Upper 2b of irqaction->flags holds the ino.
96 * irqaction->mask holds the smp affinity information.
97 */
98 #define put_ino_in_irqaction(action, irq) \
99 action->flags &= 0xffffffffffffUL; \
100 if (__bucket(irq) == &pil0_dummy_bucket) \
101 action->flags |= 0xdeadUL << 48; \
102 else \
103 action->flags |= __irq_ino(irq) << 48;
104 #define get_ino_in_irqaction(action) (action->flags >> 48)
105
106 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
107 #define get_smpaff_in_irqaction(action) ((action)->mask)
108
get_irq_list(char * buf)109 int get_irq_list(char *buf)
110 {
111 int i, len = 0;
112 struct irqaction *action;
113 #ifdef CONFIG_SMP
114 int j;
115 #endif
116
117 for(i = 0; i < (NR_IRQS + 1); i++) {
118 if(!(action = *(i + irq_action)))
119 continue;
120 len += sprintf(buf + len, "%3d: ", i);
121 #ifndef CONFIG_SMP
122 len += sprintf(buf + len, "%10u ", kstat_irqs(i));
123 #else
124 for (j = 0; j < smp_num_cpus; j++)
125 len += sprintf(buf + len, "%10u ",
126 kstat.irqs[cpu_logical_map(j)][i]);
127 #endif
128 len += sprintf(buf + len, " %s:%lx", action->name, \
129 get_ino_in_irqaction(action));
130 for(action = action->next; action; action = action->next) {
131 len += sprintf(buf+len, ", %s:%lx", action->name, \
132 get_ino_in_irqaction(action));
133 }
134 len += sprintf(buf + len, "\n");
135 }
136 return len;
137 }
138
139 /* Now these are always passed a true fully specified sun4u INO. */
enable_irq(unsigned int irq)140 void enable_irq(unsigned int irq)
141 {
142 struct ino_bucket *bucket = __bucket(irq);
143 unsigned long imap;
144 unsigned long tid;
145
146 imap = bucket->imap;
147 if (imap == 0UL)
148 return;
149
150 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
151 unsigned long ver;
152
153 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
154 if ((ver >> 32) == 0x003e0016) {
155 /* We set it to our JBUS ID. */
156 __asm__ __volatile__("ldxa [%%g0] %1, %0"
157 : "=r" (tid)
158 : "i" (ASI_JBUS_CONFIG));
159 tid = ((tid & (0x1fUL<<17)) << 9);
160 tid &= IMAP_TID_JBUS;
161 } else {
162 /* We set it to our Safari AID. */
163 __asm__ __volatile__("ldxa [%%g0] %1, %0"
164 : "=r" (tid)
165 : "i" (ASI_SAFARI_CONFIG));
166 tid = ((tid & (0x3ffUL<<17)) << 9);
167 tid &= IMAP_AID_SAFARI;
168 }
169 } else if (this_is_starfire == 0) {
170 /* We set it to our UPA MID. */
171 __asm__ __volatile__("ldxa [%%g0] %1, %0"
172 : "=r" (tid)
173 : "i" (ASI_UPA_CONFIG));
174 tid = ((tid & UPA_CONFIG_MID) << 9);
175 tid &= IMAP_TID_UPA;
176 } else {
177 tid = (starfire_translate(imap, current->processor) << 26);
178 tid &= IMAP_TID_UPA;
179 }
180
181 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
182 * of this SYSIO's preconfigured IGN in the SYSIO Control
183 * Register, the hardware just mirrors that value here.
184 * However for Graphics and UPA Slave devices the full
185 * IMAP_INR field can be set by the programmer here.
186 *
187 * Things like FFB can now be handled via the new IRQ mechanism.
188 */
189 upa_writel(tid | IMAP_VALID, imap);
190 }
191
192 /* This now gets passed true ino's as well. */
disable_irq(unsigned int irq)193 void disable_irq(unsigned int irq)
194 {
195 struct ino_bucket *bucket = __bucket(irq);
196 unsigned long imap;
197
198 imap = bucket->imap;
199 if (imap != 0UL) {
200 u32 tmp;
201
202 /* NOTE: We do not want to futz with the IRQ clear registers
203 * and move the state to IDLE, the SCSI code does call
204 * disable_irq() to assure atomicity in the queue cmd
205 * SCSI adapter driver code. Thus we'd lose interrupts.
206 */
207 tmp = upa_readl(imap);
208 tmp &= ~IMAP_VALID;
209 upa_writel(tmp, imap);
210 }
211 }
212
213 /* The timer is the one "weird" interrupt which is generated by
214 * the CPU %tick register and not by some normal vectored interrupt
215 * source. To handle this special case, we use this dummy INO bucket.
216 */
217 static struct ino_bucket pil0_dummy_bucket = {
218 0, /* irq_chain */
219 0, /* pil */
220 0, /* pending */
221 0, /* flags */
222 0, /* __unused */
223 NULL, /* irq_info */
224 0UL, /* iclr */
225 0UL, /* imap */
226 };
227
build_irq(int pil,int inofixup,unsigned long iclr,unsigned long imap)228 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
229 {
230 struct ino_bucket *bucket;
231 int ino;
232
233 if(pil == 0) {
234 if(iclr != 0UL || imap != 0UL) {
235 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
236 iclr, imap);
237 prom_halt();
238 }
239 return __irq(&pil0_dummy_bucket);
240 }
241
242 /* RULE: Both must be specified in all other cases. */
243 if (iclr == 0UL || imap == 0UL) {
244 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
245 pil, inofixup, iclr, imap);
246 prom_halt();
247 }
248
249 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
250 if(ino > NUM_IVECS) {
251 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
252 ino, pil, inofixup, iclr, imap);
253 prom_halt();
254 }
255
256 /* Ok, looks good, set it up. Don't touch the irq_chain or
257 * the pending flag.
258 */
259 bucket = &ivector_table[ino];
260 if ((bucket->flags & IBF_ACTIVE) ||
261 (bucket->irq_info != NULL)) {
262 /* This is a gross fatal error if it happens here. */
263 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
264 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
265 ino, pil, inofixup, iclr, imap);
266 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
267 bucket->pil, bucket->iclr, bucket->imap);
268 prom_printf("IRQ: Cannot continue, halting...\n");
269 prom_halt();
270 }
271 bucket->imap = imap;
272 bucket->iclr = iclr;
273 bucket->pil = pil;
274 bucket->flags = 0;
275
276 bucket->irq_info = NULL;
277
278 return __irq(bucket);
279 }
280
atomic_bucket_insert(struct ino_bucket * bucket)281 static void atomic_bucket_insert(struct ino_bucket *bucket)
282 {
283 unsigned long pstate;
284 unsigned int *ent;
285
286 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
287 __asm__ __volatile__("wrpr %0, %1, %%pstate"
288 : : "r" (pstate), "i" (PSTATE_IE));
289 ent = irq_work(smp_processor_id(), bucket->pil);
290 bucket->irq_chain = *ent;
291 *ent = __irq(bucket);
292 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
293 }
294
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * name,void * dev_id)295 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
296 unsigned long irqflags, const char *name, void *dev_id)
297 {
298 struct irqaction *action, *tmp = NULL;
299 struct ino_bucket *bucket = __bucket(irq);
300 unsigned long flags;
301 int pending = 0;
302
303 if ((bucket != &pil0_dummy_bucket) &&
304 (bucket < &ivector_table[0] ||
305 bucket >= &ivector_table[NUM_IVECS])) {
306 unsigned int *caller;
307
308 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
309 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
310 "from %p, irq %08x.\n", caller, irq);
311 return -EINVAL;
312 }
313 if(!handler)
314 return -EINVAL;
315
316 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
317 /*
318 * This function might sleep, we want to call it first,
319 * outside of the atomic block. In SA_STATIC_ALLOC case,
320 * random driver's kmalloc will fail, but it is safe.
321 * If already initialized, random driver will not reinit.
322 * Yes, this might clear the entropy pool if the wrong
323 * driver is attempted to be loaded, without actually
324 * installing a new handler, but is this really a problem,
325 * only the sysadmin is able to do this.
326 */
327 rand_initialize_irq(irq);
328 }
329
330 save_and_cli(flags);
331
332 action = *(bucket->pil + irq_action);
333 if(action) {
334 if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
335 for (tmp = action; tmp->next; tmp = tmp->next)
336 ;
337 else {
338 restore_flags(flags);
339 return -EBUSY;
340 }
341 action = NULL; /* Or else! */
342 }
343
344 /* If this is flagged as statically allocated then we use our
345 * private struct which is never freed.
346 */
347 if(irqflags & SA_STATIC_ALLOC) {
348 if(static_irq_count < MAX_STATIC_ALLOC)
349 action = &static_irqaction[static_irq_count++];
350 else
351 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
352 "using kmalloc\n", irq, name);
353 }
354 if(action == NULL)
355 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
356 GFP_ATOMIC);
357
358 if(!action) {
359 restore_flags(flags);
360 return -ENOMEM;
361 }
362
363 if (bucket == &pil0_dummy_bucket) {
364 bucket->irq_info = action;
365 bucket->flags |= IBF_ACTIVE;
366 } else {
367 if((bucket->flags & IBF_ACTIVE) != 0) {
368 void *orig = bucket->irq_info;
369 void **vector = NULL;
370
371 if((bucket->flags & IBF_PCI) == 0) {
372 printk("IRQ: Trying to share non-PCI bucket.\n");
373 goto free_and_ebusy;
374 }
375 if((bucket->flags & IBF_MULTI) == 0) {
376 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
377 if(vector == NULL)
378 goto free_and_enomem;
379
380 /* We might have slept. */
381 if ((bucket->flags & IBF_MULTI) != 0) {
382 int ent;
383
384 kfree(vector);
385 vector = (void **)bucket->irq_info;
386 for(ent = 0; ent < 4; ent++) {
387 if (vector[ent] == NULL) {
388 vector[ent] = action;
389 break;
390 }
391 }
392 if (ent == 4)
393 goto free_and_ebusy;
394 } else {
395 vector[0] = orig;
396 vector[1] = action;
397 vector[2] = NULL;
398 vector[3] = NULL;
399 bucket->irq_info = vector;
400 bucket->flags |= IBF_MULTI;
401 }
402 } else {
403 int ent;
404
405 vector = (void **)orig;
406 for(ent = 0; ent < 4; ent++) {
407 if(vector[ent] == NULL) {
408 vector[ent] = action;
409 break;
410 }
411 }
412 if (ent == 4)
413 goto free_and_ebusy;
414 }
415 } else {
416 bucket->irq_info = action;
417 bucket->flags |= IBF_ACTIVE;
418 }
419 pending = bucket->pending;
420 if(pending)
421 bucket->pending = 0;
422 }
423
424 action->handler = handler;
425 action->flags = irqflags;
426 action->name = name;
427 action->next = NULL;
428 action->dev_id = dev_id;
429 put_ino_in_irqaction(action, irq);
430 put_smpaff_in_irqaction(action, 0);
431
432 if(tmp)
433 tmp->next = action;
434 else
435 *(bucket->pil + irq_action) = action;
436
437 enable_irq(irq);
438
439 /* We ate the IVEC already, this makes sure it does not get lost. */
440 if(pending) {
441 atomic_bucket_insert(bucket);
442 set_softint(1 << bucket->pil);
443 }
444 restore_flags(flags);
445 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
446 register_irq_proc(__irq_ino(irq));
447
448 #ifdef CONFIG_SMP
449 distribute_irqs();
450 #endif
451 return 0;
452
453 free_and_ebusy:
454 kfree(action);
455 restore_flags(flags);
456 return -EBUSY;
457
458 free_and_enomem:
459 kfree(action);
460 restore_flags(flags);
461 return -ENOMEM;
462 }
463
free_irq(unsigned int irq,void * dev_id)464 void free_irq(unsigned int irq, void *dev_id)
465 {
466 struct irqaction *action;
467 struct irqaction *tmp = NULL;
468 unsigned long flags;
469 struct ino_bucket *bucket = __bucket(irq), *bp;
470
471 if ((bucket != &pil0_dummy_bucket) &&
472 (bucket < &ivector_table[0] ||
473 bucket >= &ivector_table[NUM_IVECS])) {
474 unsigned int *caller;
475
476 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
477 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
478 "from %p, irq %08x.\n", caller, irq);
479 return;
480 }
481
482 action = *(bucket->pil + irq_action);
483 if(!action->handler) {
484 printk("Freeing free IRQ %d\n", bucket->pil);
485 return;
486 }
487 if(dev_id) {
488 for( ; action; action = action->next) {
489 if(action->dev_id == dev_id)
490 break;
491 tmp = action;
492 }
493 if(!action) {
494 printk("Trying to free free shared IRQ %d\n", bucket->pil);
495 return;
496 }
497 } else if(action->flags & SA_SHIRQ) {
498 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
499 return;
500 }
501
502 if(action->flags & SA_STATIC_ALLOC) {
503 printk("Attempt to free statically allocated IRQ %d (%s)\n",
504 bucket->pil, action->name);
505 return;
506 }
507
508 save_and_cli(flags);
509 if(action && tmp)
510 tmp->next = action->next;
511 else
512 *(bucket->pil + irq_action) = action->next;
513
514 if (bucket != &pil0_dummy_bucket) {
515 unsigned long imap = bucket->imap;
516 void **vector, *orig;
517 int ent;
518
519 orig = bucket->irq_info;
520 vector = (void **)orig;
521
522 if ((bucket->flags & IBF_MULTI) != 0) {
523 int other = 0;
524 void *orphan = NULL;
525 for(ent = 0; ent < 4; ent++) {
526 if(vector[ent] == action)
527 vector[ent] = NULL;
528 else if(vector[ent] != NULL) {
529 orphan = vector[ent];
530 other++;
531 }
532 }
533
534 /* Only free when no other shared irq
535 * uses this bucket.
536 */
537 if(other) {
538 if (other == 1) {
539 /* Convert back to non-shared bucket. */
540 bucket->irq_info = orphan;
541 bucket->flags &= ~(IBF_MULTI);
542 kfree(vector);
543 }
544 goto out;
545 }
546 } else {
547 bucket->irq_info = NULL;
548 }
549
550 /* This unique interrupt source is now inactive. */
551 bucket->flags &= ~IBF_ACTIVE;
552
553 /* See if any other buckets share this bucket's IMAP
554 * and are still active.
555 */
556 for(ent = 0; ent < NUM_IVECS; ent++) {
557 bp = &ivector_table[ent];
558 if(bp != bucket &&
559 bp->imap == imap &&
560 (bp->flags & IBF_ACTIVE) != 0)
561 break;
562 }
563
564 /* Only disable when no other sub-irq levels of
565 * the same IMAP are active.
566 */
567 if (ent == NUM_IVECS)
568 disable_irq(irq);
569 }
570
571 out:
572 kfree(action);
573 restore_flags(flags);
574 }
575
576 #ifdef CONFIG_SMP
577
578 /* Who has the global irq brlock */
579 unsigned char global_irq_holder = NO_PROC_ID;
580
show(char * str)581 static void show(char * str)
582 {
583 int cpu = smp_processor_id();
584 int i;
585
586 printk("\n%s, CPU %d:\n", str, cpu);
587 printk("irq: %d [ ", irqs_running());
588 for (i = 0; i < smp_num_cpus; i++)
589 printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
590 printk("]\nbh: %d [ ",
591 (spin_is_locked(&global_bh_lock) ? 1 : 0));
592 for (i = 0; i < smp_num_cpus; i++)
593 printk("%u ", local_bh_count(i));
594 printk("]\n");
595 }
596
597 #define MAXCOUNT 100000000
598
599 #if 0
600 #define SYNC_OTHER_ULTRAS(x) udelay(x+1)
601 #else
602 #define SYNC_OTHER_ULTRAS(x) membar_safe("#Sync");
603 #endif
604
synchronize_irq(void)605 void synchronize_irq(void)
606 {
607 if (irqs_running()) {
608 cli();
609 sti();
610 }
611 }
612
get_irqlock(int cpu)613 static inline void get_irqlock(int cpu)
614 {
615 int count;
616
617 if ((unsigned char)cpu == global_irq_holder)
618 return;
619
620 count = MAXCOUNT;
621 again:
622 br_write_lock(BR_GLOBALIRQ_LOCK);
623 for (;;) {
624 spinlock_t *lock;
625
626 if (!irqs_running() &&
627 (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))
628 break;
629
630 br_write_unlock(BR_GLOBALIRQ_LOCK);
631 lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
632 while (irqs_running() ||
633 spin_is_locked(lock) ||
634 (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {
635 if (!--count) {
636 show("get_irqlock");
637 count = (~0 >> 1);
638 }
639 __sti();
640 SYNC_OTHER_ULTRAS(cpu);
641 __cli();
642 }
643 goto again;
644 }
645
646 global_irq_holder = cpu;
647 }
648
__global_cli(void)649 void __global_cli(void)
650 {
651 unsigned long flags;
652
653 __save_flags(flags);
654 if(flags == 0) {
655 int cpu = smp_processor_id();
656 __cli();
657 if (! local_irq_count(cpu))
658 get_irqlock(cpu);
659 }
660 }
661
__global_sti(void)662 void __global_sti(void)
663 {
664 int cpu = smp_processor_id();
665
666 if (! local_irq_count(cpu))
667 release_irqlock(cpu);
668 __sti();
669 }
670
__global_save_flags(void)671 unsigned long __global_save_flags(void)
672 {
673 unsigned long flags, local_enabled, retval;
674
675 __save_flags(flags);
676 local_enabled = ((flags == 0) ? 1 : 0);
677 retval = 2 + local_enabled;
678 if (! local_irq_count(smp_processor_id())) {
679 if (local_enabled)
680 retval = 1;
681 if (global_irq_holder == (unsigned char) smp_processor_id())
682 retval = 0;
683 }
684 return retval;
685 }
686
__global_restore_flags(unsigned long flags)687 void __global_restore_flags(unsigned long flags)
688 {
689 switch (flags) {
690 case 0:
691 __global_cli();
692 break;
693 case 1:
694 __global_sti();
695 break;
696 case 2:
697 __cli();
698 break;
699 case 3:
700 __sti();
701 break;
702 default:
703 {
704 unsigned long pc;
705 __asm__ __volatile__("mov %%i7, %0" : "=r" (pc));
706 printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",
707 flags, pc);
708 }
709 }
710 }
711
712 #endif /* CONFIG_SMP */
713
catch_disabled_ivec(struct pt_regs * regs)714 void catch_disabled_ivec(struct pt_regs *regs)
715 {
716 int cpu = smp_processor_id();
717 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
718
719 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
720 * to other devices. Here a single IMAP enabled potentially multiple
721 * unique interrupt sources (which each do have a unique ICLR register.
722 *
723 * So what we do is just register that the IVEC arrived, when registered
724 * for real the request_irq() code will check the bit and signal
725 * a local CPU interrupt for it.
726 */
727 #if 0
728 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
729 bucket - &ivector_table[0], regs->tpc);
730 #endif
731 *irq_work(cpu, 0) = 0;
732 bucket->pending = 1;
733 }
734
735 /* Tune this... */
736 #define FORWARD_VOLUME 12
737
738 #ifdef CONFIG_SMP
739
redirect_intr(int cpu,struct ino_bucket * bp)740 static inline void redirect_intr(int cpu, struct ino_bucket *bp)
741 {
742 /* Ok, here is what is going on:
743 * 1) Retargeting IRQs on Starfire is very
744 * expensive so just forget about it on them.
745 * 2) Moving around very high priority interrupts
746 * is a losing game.
747 * 3) If the current cpu is idle, interrupts are
748 * useful work, so keep them here. But do not
749 * pass to our neighbour if he is not very idle.
750 * 4) If sysadmin explicitly asks for directed intrs,
751 * Just Do It.
752 */
753 struct irqaction *ap = bp->irq_info;
754 unsigned long cpu_mask = get_smpaff_in_irqaction(ap);
755 unsigned int buddy, ticks;
756
757 if (cpu_mask == 0)
758 cpu_mask = ~0UL;
759
760 if (this_is_starfire != 0 ||
761 bp->pil >= 10 || current->pid == 0)
762 goto out;
763
764 /* 'cpu' is the MID (ie. UPAID), calculate the MID
765 * of our buddy.
766 */
767 buddy = cpu_number_map(cpu) + 1;
768 if (buddy >= NR_CPUS ||
769 cpu_logical_map(buddy) == -1)
770 buddy = 0;
771
772 ticks = 0;
773 while ((cpu_mask & (1UL << buddy)) == 0) {
774 buddy++;
775 if (buddy >= NR_CPUS ||
776 cpu_logical_map(buddy) == -1)
777 buddy = cpu_logical_map(0);
778 if (++ticks > NR_CPUS) {
779 put_smpaff_in_irqaction(ap, 0);
780 goto out;
781 }
782 }
783
784 if (buddy == cpu_number_map(cpu))
785 goto out;
786
787 buddy = cpu_logical_map(buddy);
788
789 /* Voo-doo programming. */
790 if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
791 goto out;
792
793 /* This just so happens to be correct on Cheetah
794 * at the moment.
795 */
796 buddy <<= 26;
797
798 /* Push it to our buddy. */
799 upa_writel(buddy | IMAP_VALID, bp->imap);
800
801 out:
802 return;
803 }
804
805 #endif
806
handler_irq(int irq,struct pt_regs * regs)807 void handler_irq(int irq, struct pt_regs *regs)
808 {
809 struct ino_bucket *bp, *nbp;
810 int cpu = smp_processor_id();
811
812 #ifndef CONFIG_SMP
813 /*
814 * Check for TICK_INT on level 14 softint.
815 */
816 {
817 unsigned long clr_mask = 1 << irq;
818 unsigned long tick_mask = tick_ops->softint_mask;
819
820 if ((irq == 14) && (get_softint() & tick_mask)) {
821 irq = 0;
822 clr_mask = tick_mask;
823 }
824 clear_softint(clr_mask);
825 }
826 #else
827 int should_forward = 1;
828
829 clear_softint(1 << irq);
830 #endif
831
832 irq_enter(cpu, irq);
833 kstat.irqs[cpu][irq]++;
834
835 #ifdef CONFIG_PCI
836 if (irq == 9)
837 kbd_pt_regs = regs;
838 #endif
839
840 /* Sliiiick... */
841 #ifndef CONFIG_SMP
842 bp = ((irq != 0) ?
843 __bucket(xchg32(irq_work(cpu, irq), 0)) :
844 &pil0_dummy_bucket);
845 #else
846 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
847 #endif
848 for ( ; bp != NULL; bp = nbp) {
849 unsigned char flags = bp->flags;
850 unsigned char random = 0;
851
852 nbp = __bucket(bp->irq_chain);
853 bp->irq_chain = 0;
854
855 if ((flags & IBF_ACTIVE) != 0) {
856 #ifdef CONFIG_PCI
857 if ((flags & IBF_DMA_SYNC) != 0) {
858 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
859 upa_readq(pci_dma_wsync);
860 }
861 #endif
862 if ((flags & IBF_MULTI) == 0) {
863 struct irqaction *ap = bp->irq_info;
864 ap->handler(__irq(bp), ap->dev_id, regs);
865 random |= ap->flags & SA_SAMPLE_RANDOM;
866 } else {
867 void **vector = (void **)bp->irq_info;
868 int ent;
869 for (ent = 0; ent < 4; ent++) {
870 struct irqaction *ap = vector[ent];
871 if (ap != NULL) {
872 ap->handler(__irq(bp), ap->dev_id, regs);
873 random |= ap->flags & SA_SAMPLE_RANDOM;
874 }
875 }
876 }
877 /* Only the dummy bucket lacks IMAP/ICLR. */
878 if (bp->pil != 0) {
879 #ifdef CONFIG_SMP
880 if (should_forward) {
881 redirect_intr(cpu, bp);
882 should_forward = 0;
883 }
884 #endif
885 upa_writel(ICLR_IDLE, bp->iclr);
886 /* Test and add entropy */
887 if (random)
888 add_interrupt_randomness(irq);
889 }
890 } else
891 bp->pending = 1;
892 }
893 irq_exit(cpu, irq);
894 }
895
896 #ifdef CONFIG_BLK_DEV_FD
897 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
898
sparc_floppy_irq(int irq,void * dev_cookie,struct pt_regs * regs)899 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
900 {
901 struct irqaction *action = *(irq + irq_action);
902 struct ino_bucket *bucket;
903 int cpu = smp_processor_id();
904
905 irq_enter(cpu, irq);
906 kstat.irqs[cpu][irq]++;
907
908 *(irq_work(cpu, irq)) = 0;
909 bucket = get_ino_in_irqaction(action) + ivector_table;
910
911 floppy_interrupt(irq, dev_cookie, regs);
912 upa_writel(ICLR_IDLE, bucket->iclr);
913
914 irq_exit(cpu, irq);
915 }
916 #endif
917
918 /* The following assumes that the branch lies before the place we
919 * are branching to. This is the case for a trap vector...
920 * You have been warned.
921 */
922 #define SPARC_BRANCH(dest_addr, inst_addr) \
923 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
924
925 #define SPARC_NOP (0x01000000)
926
install_fast_irq(unsigned int cpu_irq,void (* handler)(int,void *,struct pt_regs *))927 static void install_fast_irq(unsigned int cpu_irq,
928 void (*handler)(int, void *, struct pt_regs *))
929 {
930 extern unsigned long sparc64_ttable_tl0;
931 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
932 unsigned int *insns;
933
934 ttent += 0x820;
935 ttent += (cpu_irq - 1) << 5;
936 insns = (unsigned int *) ttent;
937 insns[0] = SPARC_BRANCH(((unsigned long) handler),
938 ((unsigned long)&insns[0]));
939 insns[1] = SPARC_NOP;
940 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
941 }
942
request_fast_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * name,void * dev_id)943 int request_fast_irq(unsigned int irq,
944 void (*handler)(int, void *, struct pt_regs *),
945 unsigned long irqflags, const char *name, void *dev_id)
946 {
947 struct irqaction *action;
948 struct ino_bucket *bucket = __bucket(irq);
949 unsigned long flags;
950
951 /* No pil0 dummy buckets allowed here. */
952 if (bucket < &ivector_table[0] ||
953 bucket >= &ivector_table[NUM_IVECS]) {
954 unsigned int *caller;
955
956 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
957 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
958 "from %p, irq %08x.\n", caller, irq);
959 return -EINVAL;
960 }
961
962 if(!handler)
963 return -EINVAL;
964
965 if ((bucket->pil == 0) || (bucket->pil == 14)) {
966 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
967 return -EBUSY;
968 }
969
970 action = *(bucket->pil + irq_action);
971 if(action) {
972 if(action->flags & SA_SHIRQ)
973 panic("Trying to register fast irq when already shared.\n");
974 if(irqflags & SA_SHIRQ)
975 panic("Trying to register fast irq as shared.\n");
976 printk("request_fast_irq: Trying to register yet already owned.\n");
977 return -EBUSY;
978 }
979
980 /*
981 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
982 * support smp intr affinity in this path.
983 */
984 save_and_cli(flags);
985 if(irqflags & SA_STATIC_ALLOC) {
986 if(static_irq_count < MAX_STATIC_ALLOC)
987 action = &static_irqaction[static_irq_count++];
988 else
989 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
990 "using kmalloc\n", bucket->pil, name);
991 }
992 if(action == NULL)
993 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
994 GFP_ATOMIC);
995 if(!action) {
996 restore_flags(flags);
997 return -ENOMEM;
998 }
999 install_fast_irq(bucket->pil, handler);
1000
1001 bucket->irq_info = action;
1002 bucket->flags |= IBF_ACTIVE;
1003
1004 action->handler = handler;
1005 action->flags = irqflags;
1006 action->dev_id = NULL;
1007 action->name = name;
1008 action->next = NULL;
1009 put_ino_in_irqaction(action, irq);
1010 put_smpaff_in_irqaction(action, 0);
1011
1012 *(bucket->pil + irq_action) = action;
1013 enable_irq(irq);
1014
1015 restore_flags(flags);
1016
1017 #ifdef CONFIG_SMP
1018 distribute_irqs();
1019 #endif
1020 return 0;
1021 }
1022
1023 /* We really don't need these at all on the Sparc. We only have
1024 * stubs here because they are exported to modules.
1025 */
probe_irq_on(void)1026 unsigned long probe_irq_on(void)
1027 {
1028 return 0;
1029 }
1030
probe_irq_off(unsigned long mask)1031 int probe_irq_off(unsigned long mask)
1032 {
1033 return 0;
1034 }
1035
1036 #ifdef CONFIG_SMP
retarget_one_irq(struct irqaction * p,int goal_cpu)1037 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
1038 {
1039 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
1040 unsigned long imap = bucket->imap;
1041 unsigned int tid;
1042
1043 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1044 tid = __cpu_logical_map[goal_cpu] << 26;
1045 tid &= IMAP_AID_SAFARI;
1046 } else if (this_is_starfire == 0) {
1047 tid = __cpu_logical_map[goal_cpu] << 26;
1048 tid &= IMAP_TID_UPA;
1049 } else {
1050 tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
1051 tid &= IMAP_TID_UPA;
1052 }
1053 upa_writel(tid | IMAP_VALID, imap);
1054
1055 goal_cpu++;
1056 if(goal_cpu >= NR_CPUS ||
1057 __cpu_logical_map[goal_cpu] == -1)
1058 goal_cpu = 0;
1059 return goal_cpu;
1060 }
1061
1062 /* Called from request_irq. */
distribute_irqs(void)1063 static void distribute_irqs(void)
1064 {
1065 unsigned long flags;
1066 int cpu, level;
1067
1068 save_and_cli(flags);
1069 cpu = 0;
1070
1071 /*
1072 * Skip the timer at [0], and very rare error/power intrs at [15].
1073 * Also level [12], it causes problems on Ex000 systems.
1074 */
1075 for(level = 1; level < NR_IRQS; level++) {
1076 struct irqaction *p = irq_action[level];
1077 if (level == 12) continue;
1078 while(p) {
1079 cpu = retarget_one_irq(p, cpu);
1080 p = p->next;
1081 }
1082 }
1083 restore_flags(flags);
1084 }
1085 #endif
1086
1087
1088 struct sun5_timer *prom_timers;
1089 static u64 prom_limit0, prom_limit1;
1090
map_prom_timers(void)1091 static void map_prom_timers(void)
1092 {
1093 unsigned int addr[3];
1094 int tnode, err;
1095
1096 /* PROM timer node hangs out in the top level of device siblings... */
1097 tnode = prom_finddevice("/counter-timer");
1098
1099 /* Assume if node is not present, PROM uses different tick mechanism
1100 * which we should not care about.
1101 */
1102 if(tnode == 0 || tnode == -1) {
1103 prom_timers = (struct sun5_timer *) 0;
1104 return;
1105 }
1106
1107 /* If PROM is really using this, it must be mapped by him. */
1108 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1109 if(err == -1) {
1110 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1111 prom_timers = (struct sun5_timer *) 0;
1112 return;
1113 }
1114 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1115 }
1116
kill_prom_timer(void)1117 static void kill_prom_timer(void)
1118 {
1119 if(!prom_timers)
1120 return;
1121
1122 /* Save them away for later. */
1123 prom_limit0 = prom_timers->limit0;
1124 prom_limit1 = prom_timers->limit1;
1125
1126 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1127 * We turn both off here just to be paranoid.
1128 */
1129 prom_timers->limit0 = 0;
1130 prom_timers->limit1 = 0;
1131
1132 /* Wheee, eat the interrupt packet too... */
1133 __asm__ __volatile__(
1134 " mov 0x40, %%g2\n"
1135 " ldxa [%%g0] %0, %%g1\n"
1136 " ldxa [%%g2] %1, %%g1\n"
1137 " stxa %%g0, [%%g0] %0\n"
1138 " membar #Sync\n"
1139 : /* no outputs */
1140 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1141 : "g1", "g2");
1142 }
1143
enable_prom_timer(void)1144 void enable_prom_timer(void)
1145 {
1146 if(!prom_timers)
1147 return;
1148
1149 /* Set it to whatever was there before. */
1150 prom_timers->limit1 = prom_limit1;
1151 prom_timers->count1 = 0;
1152 prom_timers->limit0 = prom_limit0;
1153 prom_timers->count0 = 0;
1154 }
1155
init_IRQ(void)1156 void __init init_IRQ(void)
1157 {
1158 static int called = 0;
1159
1160 if (called == 0) {
1161 called = 1;
1162 map_prom_timers();
1163 kill_prom_timer();
1164 memset(&ivector_table[0], 0, sizeof(ivector_table));
1165 #ifndef CONFIG_SMP
1166 memset(&__up_workvec[0], 0, sizeof(__up_workvec));
1167 #endif
1168 }
1169
1170 /* We need to clear any IRQ's pending in the soft interrupt
1171 * registers, a spurious one could be left around from the
1172 * PROM timer which we just disabled.
1173 */
1174 clear_softint(get_softint());
1175
1176 /* Now that ivector table is initialized, it is safe
1177 * to receive IRQ vector traps. We will normally take
1178 * one or two right now, in case some device PROM used
1179 * to boot us wants to speak to us. We just ignore them.
1180 */
1181 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1182 "or %%g1, %0, %%g1\n\t"
1183 "wrpr %%g1, 0x0, %%pstate"
1184 : /* No outputs */
1185 : "i" (PSTATE_IE)
1186 : "g1");
1187 }
1188
1189 static struct proc_dir_entry * root_irq_dir;
1190 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1191
1192 #ifdef CONFIG_SMP
1193
1194 #define HEX_DIGITS 16
1195
parse_hex_value(const char * buffer,unsigned long count,unsigned long * ret)1196 static unsigned int parse_hex_value (const char *buffer,
1197 unsigned long count, unsigned long *ret)
1198 {
1199 unsigned char hexnum [HEX_DIGITS];
1200 unsigned long value;
1201 int i;
1202
1203 if (!count)
1204 return -EINVAL;
1205 if (count > HEX_DIGITS)
1206 count = HEX_DIGITS;
1207 if (copy_from_user(hexnum, buffer, count))
1208 return -EFAULT;
1209
1210 /*
1211 * Parse the first 8 characters as a hex string, any non-hex char
1212 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1213 */
1214 value = 0;
1215
1216 for (i = 0; i < count; i++) {
1217 unsigned int c = hexnum[i];
1218
1219 switch (c) {
1220 case '0' ... '9': c -= '0'; break;
1221 case 'a' ... 'f': c -= 'a'-10; break;
1222 case 'A' ... 'F': c -= 'A'-10; break;
1223 default:
1224 goto out;
1225 }
1226 value = (value << 4) | c;
1227 }
1228 out:
1229 *ret = value;
1230 return 0;
1231 }
1232
hw_to_logical(unsigned long mask)1233 static unsigned long hw_to_logical(unsigned long mask)
1234 {
1235 unsigned long new_mask = 0UL;
1236 int i;
1237
1238 for (i = 0; i < NR_CPUS; i++) {
1239 if (mask & (1UL << i)) {
1240 int logical = cpu_number_map(i);
1241
1242 new_mask |= (1UL << logical);
1243 }
1244 }
1245
1246 return new_mask;
1247 }
1248
logical_to_hw(unsigned long mask)1249 static unsigned long logical_to_hw(unsigned long mask)
1250 {
1251 unsigned long new_mask = 0UL;
1252 int i;
1253
1254 for (i = 0; i < NR_CPUS; i++) {
1255 if (mask & (1UL << i)) {
1256 int hw = cpu_logical_map(i);
1257
1258 new_mask |= (1UL << hw);
1259 }
1260 }
1261
1262 return new_mask;
1263 }
1264
irq_affinity_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)1265 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1266 int count, int *eof, void *data)
1267 {
1268 struct ino_bucket *bp = ivector_table + (long)data;
1269 struct irqaction *ap = bp->irq_info;
1270 unsigned long mask = get_smpaff_in_irqaction(ap);
1271
1272 mask = logical_to_hw(mask);
1273
1274 if (count < HEX_DIGITS+1)
1275 return -EINVAL;
1276 return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask);
1277 }
1278
set_intr_affinity(int irq,unsigned long hw_aff)1279 static inline void set_intr_affinity(int irq, unsigned long hw_aff)
1280 {
1281 struct ino_bucket *bp = ivector_table + irq;
1282 unsigned long aff = hw_to_logical(hw_aff);
1283
1284 /*
1285 * Users specify affinity in terms of cpu ids, which is what
1286 * is displayed via /proc/cpuinfo. As soon as we do this,
1287 * handler_irq() might see and take action.
1288 */
1289 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, aff);
1290
1291 /* Migration is simply done by the next cpu to service this
1292 * interrupt.
1293 */
1294 }
1295
irq_affinity_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)1296 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1297 unsigned long count, void *data)
1298 {
1299 int irq = (long) data, full_count = count, err;
1300 unsigned long new_value;
1301
1302 err = parse_hex_value(buffer, count, &new_value);
1303
1304 /*
1305 * Do not allow disabling IRQs completely - it's a too easy
1306 * way to make the system unusable accidentally :-) At least
1307 * one online CPU still has to be targeted.
1308 */
1309 new_value &= cpu_online_map;
1310 if (!new_value)
1311 return -EINVAL;
1312
1313 set_intr_affinity(irq, new_value);
1314
1315 return full_count;
1316 }
1317
1318 #endif
1319
1320 #define MAX_NAMELEN 10
1321
register_irq_proc(unsigned int irq)1322 static void register_irq_proc (unsigned int irq)
1323 {
1324 char name [MAX_NAMELEN];
1325
1326 if (!root_irq_dir || irq_dir[irq])
1327 return;
1328
1329 memset(name, 0, MAX_NAMELEN);
1330 sprintf(name, "%x", irq);
1331
1332 /* create /proc/irq/1234 */
1333 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1334
1335 #ifdef CONFIG_SMP
1336 /* XXX SMP affinity not supported on starfire yet. */
1337 if (this_is_starfire == 0) {
1338 struct proc_dir_entry *entry;
1339
1340 /* create /proc/irq/1234/smp_affinity */
1341 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1342
1343 if (entry) {
1344 entry->nlink = 1;
1345 entry->data = (void *)(long)irq;
1346 entry->read_proc = irq_affinity_read_proc;
1347 entry->write_proc = irq_affinity_write_proc;
1348 }
1349 }
1350 #endif
1351 }
1352
init_irq_proc(void)1353 void init_irq_proc (void)
1354 {
1355 /* create /proc/irq */
1356 root_irq_dir = proc_mkdir("irq", 0);
1357 }
1358
1359