1 /*
2  *  arch/ppc/kernel/irq.c
3  *
4  *  Derived from arch/i386/kernel/irq.c
5  *    Copyright (C) 1992 Linus Torvalds
6  *  Adapted from arch/i386 by Gary Thomas
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
9  *    Copyright (C) 1996-2001 Cort Dougan
10  *  Adapted for Power Macintosh by Paul Mackerras
11  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13  *
14  * This file contains the code used by various IRQ handling routines:
15  * asking for different IRQ's should be done through these routines
16  * instead of just grabbing them. Thus setups with different IRQ numbers
17  * shouldn't result in any weird surprises, and installing new handlers
18  * should be easier.
19  *
20  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
21  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
22  * mask register (of which only 16 are defined), hence the weird shifting
23  * and compliment of the cached_irq_mask.  I want to be able to stuff
24  * this right into the SIU SMASK register.
25  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
26  * to reduce code space and undefined function references.
27  */
28 
29 
30 #include <linux/ptrace.h>
31 #include <linux/errno.h>
32 #include <linux/threads.h>
33 #include <linux/kernel_stat.h>
34 #include <linux/signal.h>
35 #include <linux/sched.h>
36 #include <linux/ioport.h>
37 #include <linux/interrupt.h>
38 #include <linux/timex.h>
39 #include <linux/config.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 
48 #include <asm/uaccess.h>
49 #include <asm/bitops.h>
50 #include <asm/system.h>
51 #include <asm/io.h>
52 #include <asm/pgtable.h>
53 #include <asm/irq.h>
54 #include <asm/cache.h>
55 #include <asm/prom.h>
56 #include <asm/ptrace.h>
57 
58 #define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
59 
60 extern atomic_t ipi_recv;
61 extern atomic_t ipi_sent;
62 void enable_irq(unsigned int irq_nr);
63 void disable_irq(unsigned int irq_nr);
64 
65 static void register_irq_proc (unsigned int irq);
66 
67 #define MAXCOUNT 10000000
68 
69 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
70 	{ [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};
71 
72 int ppc_spurious_interrupts = 0;
73 struct irqaction *ppc_irq_action[NR_IRQS];
74 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
75 unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
76 atomic_t ppc_n_lost_interrupts;
77 
78 /* nasty hack for shared irq's since we need to do kmalloc calls but
79  * can't very early in the boot when we need to do a request irq.
80  * this needs to be removed.
81  * -- Cort
82  */
83 #define IRQ_KMALLOC_ENTRIES 8
84 static int cache_bitmask = 0;
85 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
86 extern int mem_init_done;
87 
88 #if defined(CONFIG_TAU_INT)
89 extern int tau_interrupts(unsigned long cpu);
90 extern int tau_initialized;
91 #endif
92 
irq_kmalloc(size_t size,int pri)93 void *irq_kmalloc(size_t size, int pri)
94 {
95 	unsigned int i;
96 	if ( mem_init_done )
97 		return kmalloc(size,pri);
98 	for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
99 		if ( ! ( cache_bitmask & (1<<i) ) )
100 		{
101 			cache_bitmask |= (1<<i);
102 			return (void *)(&malloc_cache[i]);
103 		}
104 	return 0;
105 }
106 
irq_kfree(void * ptr)107 void irq_kfree(void *ptr)
108 {
109 	unsigned int i;
110 	for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
111 		if ( ptr == &malloc_cache[i] )
112 		{
113 			cache_bitmask &= ~(1<<i);
114 			return;
115 		}
116 	kfree(ptr);
117 }
118 
119 int
setup_irq(unsigned int irq,struct irqaction * new)120 setup_irq(unsigned int irq, struct irqaction * new)
121 {
122 	int shared = 0;
123 	unsigned long flags;
124 	struct irqaction *old, **p;
125 	irq_desc_t *desc = irq_desc + irq;
126 
127 	/*
128 	 * Some drivers like serial.c use request_irq() heavily,
129 	 * so we have to be careful not to interfere with a
130 	 * running system.
131 	 */
132 	if (new->flags & SA_SAMPLE_RANDOM) {
133 		/*
134 		 * This function might sleep, we want to call it first,
135 		 * outside of the atomic block.
136 		 * Yes, this might clear the entropy pool if the wrong
137 		 * driver is attempted to be loaded, without actually
138 		 * installing a new handler, but is this really a problem,
139 		 * only the sysadmin is able to do this.
140 		 */
141 		rand_initialize_irq(irq);
142 	}
143 
144 	/*
145 	 * The following block of code has to be executed atomically
146 	 */
147 	spin_lock_irqsave(&desc->lock,flags);
148 	p = &desc->action;
149 	if ((old = *p) != NULL) {
150 		/* Can't share interrupts unless both agree to */
151 		if (!(old->flags & new->flags & SA_SHIRQ)) {
152 			spin_unlock_irqrestore(&desc->lock,flags);
153 			return -EBUSY;
154 		}
155 
156 		/* add new interrupt at end of irq queue */
157 		do {
158 			p = &old->next;
159 			old = *p;
160 		} while (old);
161 		shared = 1;
162 	}
163 
164 	*p = new;
165 
166 	if (!shared) {
167 		desc->depth = 0;
168 		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
169 		unmask_irq(irq);
170 	}
171 	spin_unlock_irqrestore(&desc->lock,flags);
172 
173 	register_irq_proc(irq);
174 	return 0;
175 }
176 
free_irq(unsigned int irq,void * dev_id)177 void free_irq(unsigned int irq, void* dev_id)
178 {
179 	irq_desc_t *desc;
180 	struct irqaction **p;
181 	unsigned long flags;
182 
183 	desc = irq_desc + irq;
184 	spin_lock_irqsave(&desc->lock,flags);
185 	p = &desc->action;
186 	for (;;) {
187 		struct irqaction * action = *p;
188 		if (action) {
189 			struct irqaction **pp = p;
190 			p = &action->next;
191 			if (action->dev_id != dev_id)
192 				continue;
193 
194 			/* Found it - now remove it from the list of entries */
195 			*pp = action->next;
196 			if (!desc->action) {
197 				desc->status |= IRQ_DISABLED;
198 				mask_irq(irq);
199 			}
200 			spin_unlock_irqrestore(&desc->lock,flags);
201 
202 #ifdef CONFIG_SMP
203 			/* Wait to make sure it's not being used on another CPU */
204 			while (desc->status & IRQ_INPROGRESS)
205 				barrier();
206 #endif
207 			irq_kfree(action);
208 			return;
209 		}
210 		printk("Trying to free free IRQ%d\n",irq);
211 		spin_unlock_irqrestore(&desc->lock,flags);
212 		break;
213 	}
214 	return;
215 }
216 
request_irq(unsigned int irq,void (* handler)(int,void *,struct pt_regs *),unsigned long irqflags,const char * devname,void * dev_id)217 int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
218 	unsigned long irqflags, const char * devname, void *dev_id)
219 {
220 	struct irqaction *action;
221 	int retval;
222 
223 	if (irq >= NR_IRQS)
224 		return -EINVAL;
225 	if (!handler)
226 	{
227 		/*
228 		 * free_irq() used to be implemented as a call to
229 		 * request_irq() with handler being NULL.  Now we have
230 		 * a real free_irq() but need to allow the old behavior
231 		 * for old code that hasn't caught up yet.
232 		 *  -- Cort <cort@fsmlabs.com>
233 		 */
234 		free_irq(irq, dev_id);
235 		return 0;
236 	}
237 
238 	action = (struct irqaction *)
239 		irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
240 	if (!action) {
241 		printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
242 		return -ENOMEM;
243 	}
244 
245 	action->handler = handler;
246 	action->flags = irqflags;
247 	action->mask = 0;
248 	action->name = devname;
249 	action->dev_id = dev_id;
250 	action->next = NULL;
251 
252 	retval = setup_irq(irq, action);
253 	if (retval)
254 	{
255 		kfree(action);
256 		return retval;
257 	}
258 
259 	return 0;
260 }
261 
262 /*
263  * Generic enable/disable code: this just calls
264  * down into the PIC-specific version for the actual
265  * hardware disable after having gotten the irq
266  * controller lock.
267  */
268 
269 /**
270  *	disable_irq_nosync - disable an irq without waiting
271  *	@irq: Interrupt to disable
272  *
273  *	Disable the selected interrupt line. Disables of an interrupt
274  *	stack. Unlike disable_irq(), this function does not ensure existing
275  *	instances of the IRQ handler have completed before returning.
276  *
277  *	This function may be called from IRQ context.
278  */
279 
disable_irq_nosync(unsigned int irq)280  void disable_irq_nosync(unsigned int irq)
281 {
282 	irq_desc_t *desc = irq_desc + irq;
283 	unsigned long flags;
284 
285 	spin_lock_irqsave(&desc->lock, flags);
286 	if (!desc->depth++) {
287 		if (!(desc->status & IRQ_PER_CPU))
288 			desc->status |= IRQ_DISABLED;
289 		mask_irq(irq);
290 	}
291 	spin_unlock_irqrestore(&desc->lock, flags);
292 }
293 
294 /**
295  *	disable_irq - disable an irq and wait for completion
296  *	@irq: Interrupt to disable
297  *
298  *	Disable the selected interrupt line. Disables of an interrupt
299  *	stack. That is for two disables you need two enables. This
300  *	function waits for any pending IRQ handlers for this interrupt
301  *	to complete before returning. If you use this function while
302  *	holding a resource the IRQ handler may need you will deadlock.
303  *
304  *	This function may be called - with care - from IRQ context.
305  */
306 
disable_irq(unsigned int irq)307 void disable_irq(unsigned int irq)
308 {
309 	disable_irq_nosync(irq);
310 
311 	if (!local_irq_count(smp_processor_id())) {
312 		do {
313 			barrier();
314 		} while (irq_desc[irq].status & IRQ_INPROGRESS);
315 	}
316 }
317 
318 /**
319  *	enable_irq - enable interrupt handling on an irq
320  *	@irq: Interrupt to enable
321  *
322  *	Re-enables the processing of interrupts on this IRQ line
323  *	providing no disable_irq calls are now in effect.
324  *
325  *	This function may be called from IRQ context.
326  */
327 
enable_irq(unsigned int irq)328 void enable_irq(unsigned int irq)
329 {
330 	irq_desc_t *desc = irq_desc + irq;
331 	unsigned long flags;
332 
333 	spin_lock_irqsave(&desc->lock, flags);
334 	switch (desc->depth) {
335 	case 1: {
336 		unsigned int status = desc->status & ~IRQ_DISABLED;
337 		desc->status = status;
338 		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
339 			desc->status = status | IRQ_REPLAY;
340 			hw_resend_irq(desc->handler,irq);
341 		}
342 		unmask_irq(irq);
343 		/* fall-through */
344 	}
345 	default:
346 		desc->depth--;
347 		break;
348 	case 0:
349 		printk("enable_irq(%u) unbalanced\n", irq);
350 	}
351 	spin_unlock_irqrestore(&desc->lock, flags);
352 }
353 
get_irq_list(char * buf)354 int get_irq_list(char *buf)
355 {
356 	int i, len = 0, j;
357 	struct irqaction * action;
358 
359 	len += sprintf(buf+len, "           ");
360 	for (j=0; j<smp_num_cpus; j++)
361 		len += sprintf(buf+len, "CPU%d       ",j);
362 	*(char *)(buf+len++) = '\n';
363 
364 	for (i = 0 ; i < NR_IRQS ; i++) {
365 		action = irq_desc[i].action;
366 		if ( !action || !action->handler )
367 			continue;
368 		len += sprintf(buf+len, "%3d: ", i);
369 #ifdef CONFIG_SMP
370 		for (j = 0; j < smp_num_cpus; j++)
371 			len += sprintf(buf+len, "%10u ",
372 				kstat.irqs[cpu_logical_map(j)][i]);
373 #else
374 		len += sprintf(buf+len, "%10u ", kstat_irqs(i));
375 #endif /* CONFIG_SMP */
376 		if ( irq_desc[i].handler )
377 			len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );
378 		else
379 			len += sprintf(buf+len, "  None      ");
380 		len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge  ");
381 		len += sprintf(buf+len, "    %s",action->name);
382 		for (action=action->next; action; action = action->next) {
383 			len += sprintf(buf+len, ", %s", action->name);
384 		}
385 		len += sprintf(buf+len, "\n");
386 	}
387 #ifdef CONFIG_TAU_INT
388 	if (tau_initialized){
389 		len += sprintf(buf+len, "TAU: ");
390 		for (j = 0; j < smp_num_cpus; j++)
391 			len += sprintf(buf+len, "%10u ",
392 					tau_interrupts(j));
393 		len += sprintf(buf+len, "  PowerPC             Thermal Assist (cpu temp)\n");
394 	}
395 #endif
396 #ifdef CONFIG_SMP
397 	/* should this be per processor send/receive? */
398 	len += sprintf(buf+len, "IPI (recv/sent): %10u/%u\n",
399 		       atomic_read(&ipi_recv), atomic_read(&ipi_sent));
400 #endif
401 	len += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);
402 	return len;
403 }
404 
405 static inline void
handle_irq_event(int irq,struct pt_regs * regs,struct irqaction * action)406 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
407 {
408 	int status = 0;
409 
410 	if (!(action->flags & SA_INTERRUPT))
411 		__sti();
412 
413 	do {
414 		status |= action->flags;
415 		action->handler(irq, action->dev_id, regs);
416 		action = action->next;
417 	} while (action);
418 	if (status & SA_SAMPLE_RANDOM)
419 		add_interrupt_randomness(irq);
420 	__cli();
421 }
422 
423 /*
424  * Eventually, this should take an array of interrupts and an array size
425  * so it can dispatch multiple interrupts.
426  */
ppc_irq_dispatch_handler(struct pt_regs * regs,int irq)427 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
428 {
429 	int status;
430 	struct irqaction *action;
431 	int cpu = smp_processor_id();
432 	irq_desc_t *desc = &irq_desc[irq];
433 
434 	kstat.irqs[cpu][irq]++;
435 	spin_lock(&desc->lock);
436 	ack_irq(irq);
437 	/*
438 	   REPLAY is when Linux resends an IRQ that was dropped earlier
439 	   WAITING is used by probe to mark irqs that are being tested
440 	   */
441 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
442 	if (!(status & IRQ_PER_CPU))
443 		status |= IRQ_PENDING; /* we _want_ to handle it */
444 
445 	/*
446 	 * If the IRQ is disabled for whatever reason, we cannot
447 	 * use the action we have.
448 	 */
449 	action = NULL;
450 	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
451 		action = desc->action;
452 		if (!action || !action->handler) {
453 			ppc_spurious_interrupts++;
454 			printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
455 			/* We can't call disable_irq here, it would deadlock */
456 			++desc->depth;
457 			desc->status |= IRQ_DISABLED;
458 			mask_irq(irq);
459 			/* This is a real interrupt, we have to eoi it,
460 			   so we jump to out */
461 			goto out;
462 		}
463 		status &= ~IRQ_PENDING; /* we commit to handling */
464 		if (!(status & IRQ_PER_CPU))
465 			status |= IRQ_INPROGRESS; /* we are handling it */
466 	}
467 	desc->status = status;
468 
469 	/*
470 	 * If there is no IRQ handler or it was disabled, exit early.
471 	   Since we set PENDING, if another processor is handling
472 	   a different instance of this same irq, the other processor
473 	   will take care of it.
474 	 */
475 	if (!action)
476 		goto out;
477 
478 
479 	/*
480 	 * Edge triggered interrupts need to remember
481 	 * pending events.
482 	 * This applies to any hw interrupts that allow a second
483 	 * instance of the same irq to arrive while we are in do_IRQ
484 	 * or in the handler. But the code here only handles the _second_
485 	 * instance of the irq, not the third or fourth. So it is mostly
486 	 * useful for irq hardware that does not mask cleanly in an
487 	 * SMP environment.
488 	 */
489 	for (;;) {
490 		spin_unlock(&desc->lock);
491 		handle_irq_event(irq, regs, action);
492 		spin_lock(&desc->lock);
493 
494 		if (!(desc->status & IRQ_PENDING))
495 			break;
496 		desc->status &= ~IRQ_PENDING;
497 	}
498 	desc->status &= ~IRQ_INPROGRESS;
499 out:
500 	/*
501 	 * The ->end() handler has to deal with interrupts which got
502 	 * disabled while the handler was running.
503 	 */
504 	if (desc->handler) {
505 		if (desc->handler->end)
506 			desc->handler->end(irq);
507 		else if (desc->handler->enable)
508 			desc->handler->enable(irq);
509 	}
510 
511 #ifdef CONFIG_DBOX2
512 	/*
513 	 * Interrupts marked as oneshot are level
514 	 * triggered. We disable them here for onboard
515 	 * hardware which can not be configured to
516 	 * generate edge triggered interrupts due to
517 	 * lack of documentation.
518 	 */
519 	if ((action) && (action->flags & SA_ONESHOT))
520 		disable_irq_nosync(irq);
521 #endif
522 
523 	spin_unlock(&desc->lock);
524 }
525 
do_IRQ(struct pt_regs * regs)526 int do_IRQ(struct pt_regs *regs)
527 {
528 	int cpu = smp_processor_id();
529 	int irq, first = 1;
530         hardirq_enter( cpu );
531 
532 	/*
533 	 * Every platform is required to implement ppc_md.get_irq.
534 	 * This function will either return an irq number or -1 to
535 	 * indicate there are no more pending.  But the first time
536 	 * through the loop this means there wasn't an IRQ pending.
537 	 * The value -2 is for buggy hardware and means that this IRQ
538 	 * has already been handled. -- Tom
539 	 */
540 	while ((irq = ppc_md.get_irq(regs)) >= 0) {
541 		ppc_irq_dispatch_handler(regs, irq);
542 		first = 0;
543 	}
544 	if (irq != -2 && first)
545 		/* That's not SMP safe ... but who cares ? */
546 		ppc_spurious_interrupts++;
547         hardirq_exit( cpu );
548 
549 	if (softirq_pending(cpu))
550 		do_softirq();
551 	return 1; /* lets ret_from_int know we can do checks */
552 }
553 
probe_irq_on(void)554 unsigned long probe_irq_on (void)
555 {
556 	return 0;
557 }
558 
probe_irq_off(unsigned long irqs)559 int probe_irq_off (unsigned long irqs)
560 {
561 	return 0;
562 }
563 
probe_irq_mask(unsigned long irqs)564 unsigned int probe_irq_mask(unsigned long irqs)
565 {
566 	return 0;
567 }
568 
init_IRQ(void)569 void __init init_IRQ(void)
570 {
571 	static int once = 0;
572 
573 	if ( once )
574 		return;
575 	else
576 		once++;
577 
578 	ppc_md.init_IRQ();
579 }
580 
581 #ifdef CONFIG_SMP
582 unsigned char global_irq_holder = NO_PROC_ID;
583 unsigned volatile long global_irq_lock; /* pendantic :long for set_bit--RR*/
584 atomic_t global_irq_count;
585 
586 atomic_t global_bh_count;
587 
show(char * str)588 static void show(char * str)
589 {
590 	int i;
591 	unsigned long *stack;
592 	int cpu = smp_processor_id();
593 
594 	printk("\n%s, CPU %d:\n", str, cpu);
595 	printk("irq:  %d [%d %d]\n",
596 	       atomic_read(&global_irq_count),
597 	       local_irq_count(0),
598 	       local_irq_count(1));
599 	printk("bh:   %d [%d %d]\n",
600 	       atomic_read(&global_bh_count),
601 	       local_bh_count(0),
602 	       local_bh_count(1));
603 	stack = (unsigned long *) &str;
604 	for (i = 40; i ; i--) {
605 		unsigned long x = *++stack;
606 		if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {
607 			printk("<[%08lx]> ", x);
608 		}
609 	}
610 }
611 
wait_on_bh(void)612 static inline void wait_on_bh(void)
613 {
614 	int count = MAXCOUNT;
615 	do {
616 		if (!--count) {
617 			show("wait_on_bh");
618 			count = ~0;
619 		}
620 		/* nothing .. wait for the other bh's to go away */
621 	} while (atomic_read(&global_bh_count) != 0);
622 }
623 
624 
wait_on_irq(int cpu)625 static inline void wait_on_irq(int cpu)
626 {
627 	int count = MAXCOUNT;
628 
629 	for (;;) {
630 
631 		/*
632 		 * Wait until all interrupts are gone. Wait
633 		 * for bottom half handlers unless we're
634 		 * already executing in one..
635 		 */
636 		if (!atomic_read(&global_irq_count)) {
637 			if (local_bh_count(cpu)
638 			    || !atomic_read(&global_bh_count))
639 				break;
640 		}
641 
642 		/* Duh, we have to loop. Release the lock to avoid deadlocks */
643 		clear_bit(0,&global_irq_lock);
644 
645 		for (;;) {
646 			if (!--count) {
647 				show("wait_on_irq");
648 				count = ~0;
649 			}
650 			__sti();
651 			/* don't worry about the lock race Linus found
652 			 * on intel here. -- Cort
653 			 */
654 			__cli();
655 			if (atomic_read(&global_irq_count))
656 				continue;
657 			if (global_irq_lock)
658 				continue;
659 			if (!local_bh_count(cpu)
660 			    && atomic_read(&global_bh_count))
661 				continue;
662 			if (!test_and_set_bit(0,&global_irq_lock))
663 				break;
664 		}
665 	}
666 }
667 
668 /*
669  * This is called when we want to synchronize with
670  * bottom half handlers. We need to wait until
671  * no other CPU is executing any bottom half handler.
672  *
673  * Don't wait if we're already running in an interrupt
674  * context or are inside a bh handler.
675  */
synchronize_bh(void)676 void synchronize_bh(void)
677 {
678 	if (atomic_read(&global_bh_count) && !in_interrupt())
679 		wait_on_bh();
680 }
681 
682 /*
683  * This is called when we want to synchronize with
684  * interrupts. We may for example tell a device to
685  * stop sending interrupts: but to make sure there
686  * are no interrupts that are executing on another
687  * CPU we need to call this function.
688  */
synchronize_irq(void)689 void synchronize_irq(void)
690 {
691 	if (atomic_read(&global_irq_count)) {
692 		/* Stupid approach */
693 		cli();
694 		sti();
695 	}
696 }
697 
get_irqlock(int cpu)698 static inline void get_irqlock(int cpu)
699 {
700 	unsigned int loops = MAXCOUNT;
701 
702 	if (test_and_set_bit(0,&global_irq_lock)) {
703 		/* do we already hold the lock? */
704 		if ((unsigned char) cpu == global_irq_holder)
705 			return;
706 		/* Uhhuh.. Somebody else got it. Wait.. */
707 		do {
708 			do {
709 				if (loops-- == 0) {
710 					printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
711 #ifdef CONFIG_XMON
712 					xmon(0);
713 #endif
714 				}
715 			} while (test_bit(0,&global_irq_lock));
716 		} while (test_and_set_bit(0,&global_irq_lock));
717 	}
718 	/*
719 	 * We also need to make sure that nobody else is running
720 	 * in an interrupt context.
721 	 */
722 	wait_on_irq(cpu);
723 
724 	/*
725 	 * Ok, finally..
726 	 */
727 	global_irq_holder = cpu;
728 }
729 
730 /*
731  * A global "cli()" while in an interrupt context
732  * turns into just a local cli(). Interrupts
733  * should use spinlocks for the (very unlikely)
734  * case that they ever want to protect against
735  * each other.
736  *
737  * If we already have local interrupts disabled,
738  * this will not turn a local disable into a
739  * global one (problems with spinlocks: this makes
740  * save_flags+cli+sti usable inside a spinlock).
741  */
__global_cli(void)742 void __global_cli(void)
743 {
744 	unsigned long flags;
745 
746 	__save_flags(flags);
747 	if (flags & (1 << 15)) {
748 		int cpu = smp_processor_id();
749 		__cli();
750 		if (!local_irq_count(cpu))
751 			get_irqlock(cpu);
752 	}
753 }
754 
__global_sti(void)755 void __global_sti(void)
756 {
757 	int cpu = smp_processor_id();
758 
759 	if (!local_irq_count(cpu))
760 		release_irqlock(cpu);
761 	__sti();
762 }
763 
764 /*
765  * SMP flags value to restore to:
766  * 0 - global cli
767  * 1 - global sti
768  * 2 - local cli
769  * 3 - local sti
770  */
__global_save_flags(void)771 unsigned long __global_save_flags(void)
772 {
773 	int retval;
774 	int local_enabled;
775 	unsigned long flags;
776 
777 	__save_flags(flags);
778 	local_enabled = (flags >> 15) & 1;
779 	/* default to local */
780 	retval = 2 + local_enabled;
781 
782 	/* check for global flags if we're not in an interrupt */
783 	if (!local_irq_count(smp_processor_id())) {
784 		if (local_enabled)
785 			retval = 1;
786 		if (global_irq_holder == (unsigned char) smp_processor_id())
787 			retval = 0;
788 	}
789 	return retval;
790 }
791 
792 int
tb(long vals[],int max_size)793 tb(long vals[],
794    int  max_size)
795 {
796    register unsigned long *orig_sp __asm__ ("r1");
797    register unsigned long lr __asm__ ("r3");
798    unsigned long *sp;
799    int i;
800 
801    asm volatile ("mflr 3");
802    vals[0] = lr;
803    sp = (unsigned long *) *orig_sp;
804    sp = (unsigned long *) *sp;
805    for (i=1; i<max_size; i++) {
806       if (sp == 0) {
807          break;
808       }
809 
810       vals[i] = *(sp+1);
811       sp = (unsigned long *) *sp;
812    }
813 
814    return i;
815 }
816 
__global_restore_flags(unsigned long flags)817 void __global_restore_flags(unsigned long flags)
818 {
819 	switch (flags) {
820 	case 0:
821 		__global_cli();
822 		break;
823 	case 1:
824 		__global_sti();
825 		break;
826 	case 2:
827 		__cli();
828 		break;
829 	case 3:
830 		__sti();
831 		break;
832 	default:
833 	{
834 		unsigned long trace[5];
835                 int           count;
836                 int           i;
837 
838 		printk("global_restore_flags: %08lx (%08lx)\n",
839 			flags, (&flags)[-1]);
840                 count = tb(trace, 5);
841                 printk("tb:");
842                 for(i=0; i<count; i++) {
843 			printk(" %8.8lx", trace[i]);
844 		}
845 		printk("\n");
846 	}
847 	}
848 }
849 #endif /* CONFIG_SMP */
850 
851 static struct proc_dir_entry *root_irq_dir;
852 static struct proc_dir_entry *irq_dir[NR_IRQS];
853 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
854 
855 #ifdef CONFIG_IRQ_ALL_CPUS
856 #define DEFAULT_CPU_AFFINITY 0xffffffff
857 #else
858 #define DEFAULT_CPU_AFFINITY 0x00000001
859 #endif
860 
861 unsigned int irq_affinity [NR_IRQS] =
862 	{ [0 ... NR_IRQS-1] = DEFAULT_CPU_AFFINITY };
863 
864 #define HEX_DIGITS 8
865 
irq_affinity_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)866 static int irq_affinity_read_proc (char *page, char **start, off_t off,
867 			int count, int *eof, void *data)
868 {
869 	if (count < HEX_DIGITS+1)
870 		return -EINVAL;
871 	return sprintf (page, "%08x\n", irq_affinity[(int)data]);
872 }
873 
parse_hex_value(const char * buffer,unsigned long count,unsigned long * ret)874 static unsigned int parse_hex_value (const char *buffer,
875 		unsigned long count, unsigned long *ret)
876 {
877 	unsigned char hexnum [HEX_DIGITS];
878 	unsigned long value;
879 	int i;
880 
881 	if (!count)
882 		return -EINVAL;
883 	if (count > HEX_DIGITS)
884 		count = HEX_DIGITS;
885 	if (copy_from_user(hexnum, buffer, count))
886 		return -EFAULT;
887 
888 	/*
889 	 * Parse the first 8 characters as a hex string, any non-hex char
890 	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
891 	 */
892 	value = 0;
893 
894 	for (i = 0; i < count; i++) {
895 		unsigned int c = hexnum[i];
896 
897 		switch (c) {
898 			case '0' ... '9': c -= '0'; break;
899 			case 'a' ... 'f': c -= 'a'-10; break;
900 			case 'A' ... 'F': c -= 'A'-10; break;
901 		default:
902 			goto out;
903 		}
904 		value = (value << 4) | c;
905 	}
906 out:
907 	*ret = value;
908 	return 0;
909 }
910 
irq_affinity_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)911 static int irq_affinity_write_proc (struct file *file, const char *buffer,
912 					unsigned long count, void *data)
913 {
914 	int irq = (int) data, full_count = count, err;
915 	unsigned long new_value;
916 
917 	if (!irq_desc[irq].handler->set_affinity)
918 		return -EIO;
919 
920 	err = parse_hex_value(buffer, count, &new_value);
921 
922 	/*
923 	 * Do not allow disabling IRQs completely - it's a too easy
924 	 * way to make the system unusable accidentally :-) At least
925 	 * one online CPU still has to be targeted.
926 	 *
927 	 * We assume a 1-1 logical<->physical cpu mapping here.  If
928 	 * we assume that the cpu indices in /proc/irq/../smp_affinity
929 	 * are actually logical cpu #'s then we have no problem.
930 	 *  -- Cort <cort@fsmlabs.com>
931 	 */
932 	if (!(new_value & cpu_online_map))
933 		return -EINVAL;
934 
935 	irq_affinity[irq] = new_value;
936 	irq_desc[irq].handler->set_affinity(irq, new_value);
937 
938 	return full_count;
939 }
940 
prof_cpu_mask_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)941 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
942 			int count, int *eof, void *data)
943 {
944 	unsigned long *mask = (unsigned long *) data;
945 	if (count < HEX_DIGITS+1)
946 		return -EINVAL;
947 	return sprintf (page, "%08lx\n", *mask);
948 }
949 
prof_cpu_mask_write_proc(struct file * file,const char * buffer,unsigned long count,void * data)950 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
951 					unsigned long count, void *data)
952 {
953 	unsigned long *mask = (unsigned long *) data, full_count = count, err;
954 	unsigned long new_value;
955 
956 	err = parse_hex_value(buffer, count, &new_value);
957 	if (err)
958 		return err;
959 
960 	*mask = new_value;
961 	return full_count;
962 }
963 
964 #define MAX_NAMELEN 10
965 
register_irq_proc(unsigned int irq)966 static void register_irq_proc (unsigned int irq)
967 {
968 	struct proc_dir_entry *entry;
969 	char name [MAX_NAMELEN];
970 
971 	if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
972 		return;
973 
974 	memset(name, 0, MAX_NAMELEN);
975 	sprintf(name, "%d", irq);
976 
977 	/* create /proc/irq/1234 */
978 	irq_dir[irq] = proc_mkdir(name, root_irq_dir);
979 
980 	/* create /proc/irq/1234/smp_affinity */
981 	entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
982 
983 	entry->nlink = 1;
984 	entry->data = (void *)irq;
985 	entry->read_proc = irq_affinity_read_proc;
986 	entry->write_proc = irq_affinity_write_proc;
987 
988 	smp_affinity_entry[irq] = entry;
989 }
990 
991 unsigned long prof_cpu_mask = -1;
992 
init_irq_proc(void)993 void init_irq_proc (void)
994 {
995 	struct proc_dir_entry *entry;
996 	int i;
997 
998 	/* create /proc/irq */
999 	root_irq_dir = proc_mkdir("irq", 0);
1000 
1001 	/* create /proc/irq/prof_cpu_mask */
1002 	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1003 
1004 	entry->nlink = 1;
1005 	entry->data = (void *)&prof_cpu_mask;
1006 	entry->read_proc = prof_cpu_mask_read_proc;
1007 	entry->write_proc = prof_cpu_mask_write_proc;
1008 
1009 	/*
1010 	 * Create entries for all existing IRQs.
1011 	 */
1012 	for (i = 0; i < NR_IRQS; i++) {
1013 		if (irq_desc[i].handler == NULL)
1014 			continue;
1015 		register_irq_proc(i);
1016 	}
1017 }
1018 
no_action(int irq,void * dev,struct pt_regs * regs)1019 void no_action(int irq, void *dev, struct pt_regs *regs)
1020 {
1021 }
1022