1 /*
2  * linux/kernel/irq/spurious.c
3  *
4  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5  *
6  * This file contains spurious interrupt handling.
7  */
8 
9 #include <linux/jiffies.h>
10 #include <linux/irq.h>
11 #include <linux/module.h>
12 #include <linux/kallsyms.h>
13 #include <linux/interrupt.h>
14 #include <linux/moduleparam.h>
15 #include <linux/timer.h>
16 
17 #include "internals.h"
18 
19 static int irqfixup __read_mostly;
20 
21 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
22 static void poll_spurious_irqs(unsigned long dummy);
23 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
24 static int irq_poll_cpu;
25 static atomic_t irq_poll_active;
26 
27 /*
28  * We wait here for a poller to finish.
29  *
30  * If the poll runs on this CPU, then we yell loudly and return
31  * false. That will leave the interrupt line disabled in the worst
32  * case, but it should never happen.
33  *
34  * We wait until the poller is done and then recheck disabled and
35  * action (about to be disabled). Only if it's still active, we return
36  * true and let the handler run.
37  */
irq_wait_for_poll(struct irq_desc * desc)38 bool irq_wait_for_poll(struct irq_desc *desc)
39 {
40 	if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
41 		      "irq poll in progress on cpu %d for irq %d\n",
42 		      smp_processor_id(), desc->irq_data.irq))
43 		return false;
44 
45 #ifdef CONFIG_SMP
46 	do {
47 		raw_spin_unlock(&desc->lock);
48 		while (irqd_irq_inprogress(&desc->irq_data))
49 			cpu_relax();
50 		raw_spin_lock(&desc->lock);
51 	} while (irqd_irq_inprogress(&desc->irq_data));
52 	/* Might have been disabled in meantime */
53 	return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54 #else
55 	return false;
56 #endif
57 }
58 
59 
60 /*
61  * Recovery handler for misrouted interrupts.
62  */
try_one_irq(int irq,struct irq_desc * desc,bool force)63 static int try_one_irq(int irq, struct irq_desc *desc, bool force)
64 {
65 	irqreturn_t ret = IRQ_NONE;
66 	struct irqaction *action;
67 
68 	raw_spin_lock(&desc->lock);
69 
70 	/* PER_CPU and nested thread interrupts are never polled */
71 	if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
72 		goto out;
73 
74 	/*
75 	 * Do not poll disabled interrupts unless the spurious
76 	 * disabled poller asks explicitely.
77 	 */
78 	if (irqd_irq_disabled(&desc->irq_data) && !force)
79 		goto out;
80 
81 	/*
82 	 * All handlers must agree on IRQF_SHARED, so we test just the
83 	 * first.
84 	 */
85 	action = desc->action;
86 	if (!action || !(action->flags & IRQF_SHARED) ||
87 	    (action->flags & __IRQF_TIMER))
88 		goto out;
89 
90 	/* Already running on another processor */
91 	if (irqd_irq_inprogress(&desc->irq_data)) {
92 		/*
93 		 * Already running: If it is shared get the other
94 		 * CPU to go looking for our mystery interrupt too
95 		 */
96 		desc->istate |= IRQS_PENDING;
97 		goto out;
98 	}
99 
100 	/* Mark it poll in progress */
101 	desc->istate |= IRQS_POLL_INPROGRESS;
102 	do {
103 		if (handle_irq_event(desc) == IRQ_HANDLED)
104 			ret = IRQ_HANDLED;
105 		/* Make sure that there is still a valid action */
106 		action = desc->action;
107 	} while ((desc->istate & IRQS_PENDING) && action);
108 	desc->istate &= ~IRQS_POLL_INPROGRESS;
109 out:
110 	raw_spin_unlock(&desc->lock);
111 	return ret == IRQ_HANDLED;
112 }
113 
misrouted_irq(int irq)114 static int misrouted_irq(int irq)
115 {
116 	struct irq_desc *desc;
117 	int i, ok = 0;
118 
119 	if (atomic_inc_return(&irq_poll_active) != 1)
120 		goto out;
121 
122 	irq_poll_cpu = smp_processor_id();
123 
124 	for_each_irq_desc(i, desc) {
125 		if (!i)
126 			 continue;
127 
128 		if (i == irq)	/* Already tried */
129 			continue;
130 
131 		if (try_one_irq(i, desc, false))
132 			ok = 1;
133 	}
134 out:
135 	atomic_dec(&irq_poll_active);
136 	/* So the caller can adjust the irq error counts */
137 	return ok;
138 }
139 
poll_spurious_irqs(unsigned long dummy)140 static void poll_spurious_irqs(unsigned long dummy)
141 {
142 	struct irq_desc *desc;
143 	int i;
144 
145 	if (atomic_inc_return(&irq_poll_active) != 1)
146 		goto out;
147 	irq_poll_cpu = smp_processor_id();
148 
149 	for_each_irq_desc(i, desc) {
150 		unsigned int state;
151 
152 		if (!i)
153 			 continue;
154 
155 		/* Racy but it doesn't matter */
156 		state = desc->istate;
157 		barrier();
158 		if (!(state & IRQS_SPURIOUS_DISABLED))
159 			continue;
160 
161 		local_irq_disable();
162 		try_one_irq(i, desc, true);
163 		local_irq_enable();
164 	}
165 out:
166 	atomic_dec(&irq_poll_active);
167 	mod_timer(&poll_spurious_irq_timer,
168 		  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
169 }
170 
bad_action_ret(irqreturn_t action_ret)171 static inline int bad_action_ret(irqreturn_t action_ret)
172 {
173 	if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
174 		return 0;
175 	return 1;
176 }
177 
178 /*
179  * If 99,900 of the previous 100,000 interrupts have not been handled
180  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
181  * and try to turn the IRQ off.
182  *
183  * (The other 100-of-100,000 interrupts may have been a correctly
184  *  functioning device sharing an IRQ with the failing one)
185  */
186 static void
__report_bad_irq(unsigned int irq,struct irq_desc * desc,irqreturn_t action_ret)187 __report_bad_irq(unsigned int irq, struct irq_desc *desc,
188 		 irqreturn_t action_ret)
189 {
190 	struct irqaction *action;
191 	unsigned long flags;
192 
193 	if (bad_action_ret(action_ret)) {
194 		printk(KERN_ERR "irq event %d: bogus return value %x\n",
195 				irq, action_ret);
196 	} else {
197 		printk(KERN_ERR "irq %d: nobody cared (try booting with "
198 				"the \"irqpoll\" option)\n", irq);
199 	}
200 	dump_stack();
201 	printk(KERN_ERR "handlers:\n");
202 
203 	/*
204 	 * We need to take desc->lock here. note_interrupt() is called
205 	 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
206 	 * with something else removing an action. It's ok to take
207 	 * desc->lock here. See synchronize_irq().
208 	 */
209 	raw_spin_lock_irqsave(&desc->lock, flags);
210 	action = desc->action;
211 	while (action) {
212 		printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
213 		if (action->thread_fn)
214 			printk(KERN_CONT " threaded [<%p>] %pf",
215 					action->thread_fn, action->thread_fn);
216 		printk(KERN_CONT "\n");
217 		action = action->next;
218 	}
219 	raw_spin_unlock_irqrestore(&desc->lock, flags);
220 }
221 
222 static void
report_bad_irq(unsigned int irq,struct irq_desc * desc,irqreturn_t action_ret)223 report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
224 {
225 	static int count = 100;
226 
227 	if (count > 0) {
228 		count--;
229 		__report_bad_irq(irq, desc, action_ret);
230 	}
231 }
232 
233 static inline int
try_misrouted_irq(unsigned int irq,struct irq_desc * desc,irqreturn_t action_ret)234 try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
235 		  irqreturn_t action_ret)
236 {
237 	struct irqaction *action;
238 
239 	if (!irqfixup)
240 		return 0;
241 
242 	/* We didn't actually handle the IRQ - see if it was misrouted? */
243 	if (action_ret == IRQ_NONE)
244 		return 1;
245 
246 	/*
247 	 * But for 'irqfixup == 2' we also do it for handled interrupts if
248 	 * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
249 	 * traditional PC timer interrupt.. Legacy)
250 	 */
251 	if (irqfixup < 2)
252 		return 0;
253 
254 	if (!irq)
255 		return 1;
256 
257 	/*
258 	 * Since we don't get the descriptor lock, "action" can
259 	 * change under us.  We don't really care, but we don't
260 	 * want to follow a NULL pointer. So tell the compiler to
261 	 * just load it once by using a barrier.
262 	 */
263 	action = desc->action;
264 	barrier();
265 	return action && (action->flags & IRQF_IRQPOLL);
266 }
267 
268 #define SPURIOUS_DEFERRED	0x80000000
269 
note_interrupt(unsigned int irq,struct irq_desc * desc,irqreturn_t action_ret)270 void note_interrupt(unsigned int irq, struct irq_desc *desc,
271 		    irqreturn_t action_ret)
272 {
273 	if (desc->istate & IRQS_POLL_INPROGRESS)
274 		return;
275 
276 	if (bad_action_ret(action_ret)) {
277 		report_bad_irq(irq, desc, action_ret);
278 		return;
279 	}
280 
281 	/*
282 	 * We cannot call note_interrupt from the threaded handler
283 	 * because we need to look at the compound of all handlers
284 	 * (primary and threaded). Aside of that in the threaded
285 	 * shared case we have no serialization against an incoming
286 	 * hardware interrupt while we are dealing with a threaded
287 	 * result.
288 	 *
289 	 * So in case a thread is woken, we just note the fact and
290 	 * defer the analysis to the next hardware interrupt.
291 	 *
292 	 * The threaded handlers store whether they sucessfully
293 	 * handled an interrupt and we check whether that number
294 	 * changed versus the last invocation.
295 	 *
296 	 * We could handle all interrupts with the delayed by one
297 	 * mechanism, but for the non forced threaded case we'd just
298 	 * add pointless overhead to the straight hardirq interrupts
299 	 * for the sake of a few lines less code.
300 	 */
301 	if (action_ret & IRQ_WAKE_THREAD) {
302 		/*
303 		 * There is a thread woken. Check whether one of the
304 		 * shared primary handlers returned IRQ_HANDLED. If
305 		 * not we defer the spurious detection to the next
306 		 * interrupt.
307 		 */
308 		if (action_ret == IRQ_WAKE_THREAD) {
309 			int handled;
310 			/*
311 			 * We use bit 31 of thread_handled_last to
312 			 * denote the deferred spurious detection
313 			 * active. No locking necessary as
314 			 * thread_handled_last is only accessed here
315 			 * and we have the guarantee that hard
316 			 * interrupts are not reentrant.
317 			 */
318 			if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
319 				desc->threads_handled_last |= SPURIOUS_DEFERRED;
320 				return;
321 			}
322 			/*
323 			 * Check whether one of the threaded handlers
324 			 * returned IRQ_HANDLED since the last
325 			 * interrupt happened.
326 			 *
327 			 * For simplicity we just set bit 31, as it is
328 			 * set in threads_handled_last as well. So we
329 			 * avoid extra masking. And we really do not
330 			 * care about the high bits of the handled
331 			 * count. We just care about the count being
332 			 * different than the one we saw before.
333 			 */
334 			handled = atomic_read(&desc->threads_handled);
335 			handled |= SPURIOUS_DEFERRED;
336 			if (handled != desc->threads_handled_last) {
337 				action_ret = IRQ_HANDLED;
338 				/*
339 				 * Note: We keep the SPURIOUS_DEFERRED
340 				 * bit set. We are handling the
341 				 * previous invocation right now.
342 				 * Keep it for the current one, so the
343 				 * next hardware interrupt will
344 				 * account for it.
345 				 */
346 				desc->threads_handled_last = handled;
347 			} else {
348 				/*
349 				 * None of the threaded handlers felt
350 				 * responsible for the last interrupt
351 				 *
352 				 * We keep the SPURIOUS_DEFERRED bit
353 				 * set in threads_handled_last as we
354 				 * need to account for the current
355 				 * interrupt as well.
356 				 */
357 				action_ret = IRQ_NONE;
358 			}
359 		} else {
360 			/*
361 			 * One of the primary handlers returned
362 			 * IRQ_HANDLED. So we don't care about the
363 			 * threaded handlers on the same line. Clear
364 			 * the deferred detection bit.
365 			 *
366 			 * In theory we could/should check whether the
367 			 * deferred bit is set and take the result of
368 			 * the previous run into account here as
369 			 * well. But it's really not worth the
370 			 * trouble. If every other interrupt is
371 			 * handled we never trigger the spurious
372 			 * detector. And if this is just the one out
373 			 * of 100k unhandled ones which is handled
374 			 * then we merily delay the spurious detection
375 			 * by one hard interrupt. Not a real problem.
376 			 */
377 			desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
378 		}
379 	}
380 
381 	if (unlikely(action_ret == IRQ_NONE)) {
382 		/*
383 		 * If we are seeing only the odd spurious IRQ caused by
384 		 * bus asynchronicity then don't eventually trigger an error,
385 		 * otherwise the counter becomes a doomsday timer for otherwise
386 		 * working systems
387 		 */
388 		if (time_after(jiffies, desc->last_unhandled + HZ/10))
389 			desc->irqs_unhandled = 1;
390 		else
391 			desc->irqs_unhandled++;
392 		desc->last_unhandled = jiffies;
393 	}
394 
395 	if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
396 		int ok = misrouted_irq(irq);
397 		if (action_ret == IRQ_NONE)
398 			desc->irqs_unhandled -= ok;
399 	}
400 
401 	desc->irq_count++;
402 	if (likely(desc->irq_count < 100000))
403 		return;
404 
405 	desc->irq_count = 0;
406 	if (unlikely(desc->irqs_unhandled > 99900)) {
407 		/*
408 		 * The interrupt is stuck
409 		 */
410 		__report_bad_irq(irq, desc, action_ret);
411 		/*
412 		 * Now kill the IRQ
413 		 */
414 		printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
415 		desc->istate |= IRQS_SPURIOUS_DISABLED;
416 		desc->depth++;
417 		irq_disable(desc);
418 
419 		mod_timer(&poll_spurious_irq_timer,
420 			  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
421 	}
422 	desc->irqs_unhandled = 0;
423 }
424 
425 bool noirqdebug __read_mostly;
426 
noirqdebug_setup(char * str)427 int noirqdebug_setup(char *str)
428 {
429 	noirqdebug = 1;
430 	printk(KERN_INFO "IRQ lockup detection disabled\n");
431 
432 	return 1;
433 }
434 
435 __setup("noirqdebug", noirqdebug_setup);
436 module_param(noirqdebug, bool, 0644);
437 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
438 
irqfixup_setup(char * str)439 static int __init irqfixup_setup(char *str)
440 {
441 	irqfixup = 1;
442 	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
443 	printk(KERN_WARNING "This may impact system performance.\n");
444 
445 	return 1;
446 }
447 
448 __setup("irqfixup", irqfixup_setup);
449 module_param(irqfixup, int, 0644);
450 
irqpoll_setup(char * str)451 static int __init irqpoll_setup(char *str)
452 {
453 	irqfixup = 2;
454 	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
455 				"enabled\n");
456 	printk(KERN_WARNING "This may significantly impact system "
457 				"performance\n");
458 	return 1;
459 }
460 
461 __setup("irqpoll", irqpoll_setup);
462