1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
25 */
26
27 #undef DEBUG
28
29 #include <linux/export.h>
30 #include <linux/threads.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/signal.h>
33 #include <linux/sched.h>
34 #include <linux/ptrace.h>
35 #include <linux/ioport.h>
36 #include <linux/interrupt.h>
37 #include <linux/timex.h>
38 #include <linux/init.h>
39 #include <linux/slab.h>
40 #include <linux/delay.h>
41 #include <linux/irq.h>
42 #include <linux/seq_file.h>
43 #include <linux/cpumask.h>
44 #include <linux/profile.h>
45 #include <linux/bitops.h>
46 #include <linux/list.h>
47 #include <linux/radix-tree.h>
48 #include <linux/mutex.h>
49 #include <linux/pci.h>
50 #include <linux/debugfs.h>
51 #include <linux/of.h>
52 #include <linux/of_irq.h>
53 #include <linux/vmalloc.h>
54 #include <linux/pgtable.h>
55 #include <linux/static_call.h>
56
57 #include <linux/uaccess.h>
58 #include <asm/interrupt.h>
59 #include <asm/io.h>
60 #include <asm/irq.h>
61 #include <asm/cache.h>
62 #include <asm/ptrace.h>
63 #include <asm/machdep.h>
64 #include <asm/udbg.h>
65 #include <asm/smp.h>
66 #include <asm/hw_irq.h>
67 #include <asm/softirq_stack.h>
68
69 #ifdef CONFIG_PPC64
70 #include <asm/paca.h>
71 #include <asm/firmware.h>
72 #include <asm/lv1call.h>
73 #include <asm/dbell.h>
74 #endif
75 #define CREATE_TRACE_POINTS
76 #include <asm/trace.h>
77 #include <asm/cpu_has_feature.h>
78
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80 EXPORT_PER_CPU_SYMBOL(irq_stat);
81
82 #ifdef CONFIG_PPC32
83 atomic_t ppc_n_lost_interrupts;
84
85 #ifdef CONFIG_TAU_INT
86 extern int tau_initialized;
87 u32 tau_interrupts(unsigned long cpu);
88 #endif
89 #endif /* CONFIG_PPC32 */
90
91 #ifdef CONFIG_PPC64
92
93 int distribute_irqs = 1;
94
get_irq_happened(void)95 static inline notrace unsigned long get_irq_happened(void)
96 {
97 unsigned long happened;
98
99 __asm__ __volatile__("lbz %0,%1(13)"
100 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
101
102 return happened;
103 }
104
replay_soft_interrupts(void)105 void replay_soft_interrupts(void)
106 {
107 struct pt_regs regs;
108
109 /*
110 * Be careful here, calling these interrupt handlers can cause
111 * softirqs to be raised, which they may run when calling irq_exit,
112 * which will cause local_irq_enable() to be run, which can then
113 * recurse into this function. Don't keep any state across
114 * interrupt handler calls which may change underneath us.
115 *
116 * We use local_paca rather than get_paca() to avoid all the
117 * debug_smp_processor_id() business in this low level function.
118 */
119
120 ppc_save_regs(®s);
121 regs.softe = IRQS_ENABLED;
122 regs.msr |= MSR_EE;
123
124 again:
125 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
126 WARN_ON_ONCE(mfmsr() & MSR_EE);
127
128 /*
129 * Force the delivery of pending soft-disabled interrupts on PS3.
130 * Any HV call will have this side effect.
131 */
132 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
133 u64 tmp, tmp2;
134 lv1_get_version_info(&tmp, &tmp2);
135 }
136
137 /*
138 * Check if an hypervisor Maintenance interrupt happened.
139 * This is a higher priority interrupt than the others, so
140 * replay it first.
141 */
142 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
143 local_paca->irq_happened &= ~PACA_IRQ_HMI;
144 regs.trap = INTERRUPT_HMI;
145 handle_hmi_exception(®s);
146 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
147 hard_irq_disable();
148 }
149
150 if (local_paca->irq_happened & PACA_IRQ_DEC) {
151 local_paca->irq_happened &= ~PACA_IRQ_DEC;
152 regs.trap = INTERRUPT_DECREMENTER;
153 timer_interrupt(®s);
154 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
155 hard_irq_disable();
156 }
157
158 if (local_paca->irq_happened & PACA_IRQ_EE) {
159 local_paca->irq_happened &= ~PACA_IRQ_EE;
160 regs.trap = INTERRUPT_EXTERNAL;
161 do_IRQ(®s);
162 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
163 hard_irq_disable();
164 }
165
166 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
167 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
168 regs.trap = INTERRUPT_DOORBELL;
169 doorbell_exception(®s);
170 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
171 hard_irq_disable();
172 }
173
174 /* Book3E does not support soft-masking PMI interrupts */
175 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
176 local_paca->irq_happened &= ~PACA_IRQ_PMI;
177 regs.trap = INTERRUPT_PERFMON;
178 performance_monitor_exception(®s);
179 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
180 hard_irq_disable();
181 }
182
183 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
184 /*
185 * We are responding to the next interrupt, so interrupt-off
186 * latencies should be reset here.
187 */
188 trace_hardirqs_on();
189 trace_hardirqs_off();
190 goto again;
191 }
192 }
193
194 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
replay_soft_interrupts_irqrestore(void)195 static inline void replay_soft_interrupts_irqrestore(void)
196 {
197 unsigned long kuap_state = get_kuap();
198
199 /*
200 * Check if anything calls local_irq_enable/restore() when KUAP is
201 * disabled (user access enabled). We handle that case here by saving
202 * and re-locking AMR but we shouldn't get here in the first place,
203 * hence the warning.
204 */
205 kuap_assert_locked();
206
207 if (kuap_state != AMR_KUAP_BLOCKED)
208 set_kuap(AMR_KUAP_BLOCKED);
209
210 replay_soft_interrupts();
211
212 if (kuap_state != AMR_KUAP_BLOCKED)
213 set_kuap(kuap_state);
214 }
215 #else
216 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
217 #endif
218
arch_local_irq_restore(unsigned long mask)219 notrace void arch_local_irq_restore(unsigned long mask)
220 {
221 unsigned char irq_happened;
222
223 /* Write the new soft-enabled value if it is a disable */
224 if (mask) {
225 irq_soft_mask_set(mask);
226 return;
227 }
228
229 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
230 WARN_ON_ONCE(in_nmi() || in_hardirq());
231
232 /*
233 * After the stb, interrupts are unmasked and there are no interrupts
234 * pending replay. The restart sequence makes this atomic with
235 * respect to soft-masked interrupts. If this was just a simple code
236 * sequence, a soft-masked interrupt could become pending right after
237 * the comparison and before the stb.
238 *
239 * This allows interrupts to be unmasked without hard disabling, and
240 * also without new hard interrupts coming in ahead of pending ones.
241 */
242 asm_volatile_goto(
243 "1: \n"
244 " lbz 9,%0(13) \n"
245 " cmpwi 9,0 \n"
246 " bne %l[happened] \n"
247 " stb 9,%1(13) \n"
248 "2: \n"
249 RESTART_TABLE(1b, 2b, 1b)
250 : : "i" (offsetof(struct paca_struct, irq_happened)),
251 "i" (offsetof(struct paca_struct, irq_soft_mask))
252 : "cr0", "r9"
253 : happened);
254
255 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
256 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
257
258 return;
259
260 happened:
261 irq_happened = get_irq_happened();
262 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
263 WARN_ON_ONCE(!irq_happened);
264
265 if (irq_happened == PACA_IRQ_HARD_DIS) {
266 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
267 WARN_ON_ONCE(mfmsr() & MSR_EE);
268 irq_soft_mask_set(IRQS_ENABLED);
269 local_paca->irq_happened = 0;
270 __hard_irq_enable();
271 return;
272 }
273
274 /* Have interrupts to replay, need to hard disable first */
275 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
276 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
277 if (!(mfmsr() & MSR_EE)) {
278 /*
279 * An interrupt could have come in and cleared
280 * MSR[EE] and set IRQ_HARD_DIS, so check
281 * IRQ_HARD_DIS again and warn if it is still
282 * clear.
283 */
284 irq_happened = get_irq_happened();
285 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
286 }
287 }
288 __hard_irq_disable();
289 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
290 } else {
291 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
292 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
293 __hard_irq_disable();
294 }
295 }
296
297 /*
298 * Disable preempt here, so that the below preempt_enable will
299 * perform resched if required (a replayed interrupt may set
300 * need_resched).
301 */
302 preempt_disable();
303 irq_soft_mask_set(IRQS_ALL_DISABLED);
304 trace_hardirqs_off();
305
306 replay_soft_interrupts_irqrestore();
307 local_paca->irq_happened = 0;
308
309 trace_hardirqs_on();
310 irq_soft_mask_set(IRQS_ENABLED);
311 __hard_irq_enable();
312 preempt_enable();
313 }
314 EXPORT_SYMBOL(arch_local_irq_restore);
315
316 /*
317 * This is a helper to use when about to go into idle low-power
318 * when the latter has the side effect of re-enabling interrupts
319 * (such as calling H_CEDE under pHyp).
320 *
321 * You call this function with interrupts soft-disabled (this is
322 * already the case when ppc_md.power_save is called). The function
323 * will return whether to enter power save or just return.
324 *
325 * In the former case, it will have notified lockdep of interrupts
326 * being re-enabled and generally sanitized the lazy irq state,
327 * and in the latter case it will leave with interrupts hard
328 * disabled and marked as such, so the local_irq_enable() call
329 * in arch_cpu_idle() will properly re-enable everything.
330 */
prep_irq_for_idle(void)331 bool prep_irq_for_idle(void)
332 {
333 /*
334 * First we need to hard disable to ensure no interrupt
335 * occurs before we effectively enter the low power state
336 */
337 __hard_irq_disable();
338 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
339
340 /*
341 * If anything happened while we were soft-disabled,
342 * we return now and do not enter the low power state.
343 */
344 if (lazy_irq_pending())
345 return false;
346
347 /* Tell lockdep we are about to re-enable */
348 trace_hardirqs_on();
349
350 /*
351 * Mark interrupts as soft-enabled and clear the
352 * PACA_IRQ_HARD_DIS from the pending mask since we
353 * are about to hard enable as well as a side effect
354 * of entering the low power state.
355 */
356 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
357 irq_soft_mask_set(IRQS_ENABLED);
358
359 /* Tell the caller to enter the low power state */
360 return true;
361 }
362
363 #ifdef CONFIG_PPC_BOOK3S
364 /*
365 * This is for idle sequences that return with IRQs off, but the
366 * idle state itself wakes on interrupt. Tell the irq tracer that
367 * IRQs are enabled for the duration of idle so it does not get long
368 * off times. Must be paired with fini_irq_for_idle_irqsoff.
369 */
prep_irq_for_idle_irqsoff(void)370 bool prep_irq_for_idle_irqsoff(void)
371 {
372 WARN_ON(!irqs_disabled());
373
374 /*
375 * First we need to hard disable to ensure no interrupt
376 * occurs before we effectively enter the low power state
377 */
378 __hard_irq_disable();
379 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
380
381 /*
382 * If anything happened while we were soft-disabled,
383 * we return now and do not enter the low power state.
384 */
385 if (lazy_irq_pending())
386 return false;
387
388 /* Tell lockdep we are about to re-enable */
389 trace_hardirqs_on();
390
391 return true;
392 }
393
394 /*
395 * Take the SRR1 wakeup reason, index into this table to find the
396 * appropriate irq_happened bit.
397 *
398 * Sytem reset exceptions taken in idle state also come through here,
399 * but they are NMI interrupts so do not need to wait for IRQs to be
400 * restored, and should be taken as early as practical. These are marked
401 * with 0xff in the table. The Power ISA specifies 0100b as the system
402 * reset interrupt reason.
403 */
404 #define IRQ_SYSTEM_RESET 0xff
405
406 static const u8 srr1_to_lazyirq[0x10] = {
407 0, 0, 0,
408 PACA_IRQ_DBELL,
409 IRQ_SYSTEM_RESET,
410 PACA_IRQ_DBELL,
411 PACA_IRQ_DEC,
412 0,
413 PACA_IRQ_EE,
414 PACA_IRQ_EE,
415 PACA_IRQ_HMI,
416 0, 0, 0, 0, 0 };
417
replay_system_reset(void)418 void replay_system_reset(void)
419 {
420 struct pt_regs regs;
421
422 ppc_save_regs(®s);
423 regs.trap = 0x100;
424 get_paca()->in_nmi = 1;
425 system_reset_exception(®s);
426 get_paca()->in_nmi = 0;
427 }
428 EXPORT_SYMBOL_GPL(replay_system_reset);
429
irq_set_pending_from_srr1(unsigned long srr1)430 void irq_set_pending_from_srr1(unsigned long srr1)
431 {
432 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
433 u8 reason = srr1_to_lazyirq[idx];
434
435 /*
436 * Take the system reset now, which is immediately after registers
437 * are restored from idle. It's an NMI, so interrupts need not be
438 * re-enabled before it is taken.
439 */
440 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
441 replay_system_reset();
442 return;
443 }
444
445 if (reason == PACA_IRQ_DBELL) {
446 /*
447 * When doorbell triggers a system reset wakeup, the message
448 * is not cleared, so if the doorbell interrupt is replayed
449 * and the IPI handled, the doorbell interrupt would still
450 * fire when EE is enabled.
451 *
452 * To avoid taking the superfluous doorbell interrupt,
453 * execute a msgclr here before the interrupt is replayed.
454 */
455 ppc_msgclr(PPC_DBELL_MSGTYPE);
456 }
457
458 /*
459 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
460 * so this can be called unconditionally with the SRR1 wake
461 * reason as returned by the idle code, which uses 0 to mean no
462 * interrupt.
463 *
464 * If a future CPU was to designate this as an interrupt reason,
465 * then a new index for no interrupt must be assigned.
466 */
467 local_paca->irq_happened |= reason;
468 }
469 #endif /* CONFIG_PPC_BOOK3S */
470
471 /*
472 * Force a replay of the external interrupt handler on this CPU.
473 */
force_external_irq_replay(void)474 void force_external_irq_replay(void)
475 {
476 /*
477 * This must only be called with interrupts soft-disabled,
478 * the replay will happen when re-enabling.
479 */
480 WARN_ON(!arch_irqs_disabled());
481
482 /*
483 * Interrupts must always be hard disabled before irq_happened is
484 * modified (to prevent lost update in case of interrupt between
485 * load and store).
486 */
487 __hard_irq_disable();
488 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
489
490 /* Indicate in the PACA that we have an interrupt to replay */
491 local_paca->irq_happened |= PACA_IRQ_EE;
492 }
493
494 #endif /* CONFIG_PPC64 */
495
arch_show_interrupts(struct seq_file * p,int prec)496 int arch_show_interrupts(struct seq_file *p, int prec)
497 {
498 int j;
499
500 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
501 if (tau_initialized) {
502 seq_printf(p, "%*s: ", prec, "TAU");
503 for_each_online_cpu(j)
504 seq_printf(p, "%10u ", tau_interrupts(j));
505 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
506 }
507 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
508
509 seq_printf(p, "%*s: ", prec, "LOC");
510 for_each_online_cpu(j)
511 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
512 seq_printf(p, " Local timer interrupts for timer event device\n");
513
514 seq_printf(p, "%*s: ", prec, "BCT");
515 for_each_online_cpu(j)
516 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
517 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
518
519 seq_printf(p, "%*s: ", prec, "LOC");
520 for_each_online_cpu(j)
521 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
522 seq_printf(p, " Local timer interrupts for others\n");
523
524 seq_printf(p, "%*s: ", prec, "SPU");
525 for_each_online_cpu(j)
526 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
527 seq_printf(p, " Spurious interrupts\n");
528
529 seq_printf(p, "%*s: ", prec, "PMI");
530 for_each_online_cpu(j)
531 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
532 seq_printf(p, " Performance monitoring interrupts\n");
533
534 seq_printf(p, "%*s: ", prec, "MCE");
535 for_each_online_cpu(j)
536 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
537 seq_printf(p, " Machine check exceptions\n");
538
539 #ifdef CONFIG_PPC_BOOK3S_64
540 if (cpu_has_feature(CPU_FTR_HVMODE)) {
541 seq_printf(p, "%*s: ", prec, "HMI");
542 for_each_online_cpu(j)
543 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
544 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
545 }
546 #endif
547
548 seq_printf(p, "%*s: ", prec, "NMI");
549 for_each_online_cpu(j)
550 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
551 seq_printf(p, " System Reset interrupts\n");
552
553 #ifdef CONFIG_PPC_WATCHDOG
554 seq_printf(p, "%*s: ", prec, "WDG");
555 for_each_online_cpu(j)
556 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
557 seq_printf(p, " Watchdog soft-NMI interrupts\n");
558 #endif
559
560 #ifdef CONFIG_PPC_DOORBELL
561 if (cpu_has_feature(CPU_FTR_DBELL)) {
562 seq_printf(p, "%*s: ", prec, "DBL");
563 for_each_online_cpu(j)
564 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
565 seq_printf(p, " Doorbell interrupts\n");
566 }
567 #endif
568
569 return 0;
570 }
571
572 /*
573 * /proc/stat helpers
574 */
arch_irq_stat_cpu(unsigned int cpu)575 u64 arch_irq_stat_cpu(unsigned int cpu)
576 {
577 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
578
579 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
580 sum += per_cpu(irq_stat, cpu).pmu_irqs;
581 sum += per_cpu(irq_stat, cpu).mce_exceptions;
582 sum += per_cpu(irq_stat, cpu).spurious_irqs;
583 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
584 #ifdef CONFIG_PPC_BOOK3S_64
585 sum += paca_ptrs[cpu]->hmi_irqs;
586 #endif
587 sum += per_cpu(irq_stat, cpu).sreset_irqs;
588 #ifdef CONFIG_PPC_WATCHDOG
589 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
590 #endif
591 #ifdef CONFIG_PPC_DOORBELL
592 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
593 #endif
594
595 return sum;
596 }
597
check_stack_overflow(void)598 static inline void check_stack_overflow(void)
599 {
600 long sp;
601
602 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
603 return;
604
605 sp = current_stack_pointer & (THREAD_SIZE - 1);
606
607 /* check for stack overflow: is there less than 2KB free? */
608 if (unlikely(sp < 2048)) {
609 pr_err("do_IRQ: stack overflow: %ld\n", sp);
610 dump_stack();
611 }
612 }
613
call_do_softirq(const void * sp)614 static __always_inline void call_do_softirq(const void *sp)
615 {
616 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
617 asm volatile (
618 PPC_STLU " %%r1, %[offset](%[sp]) ;"
619 "mr %%r1, %[sp] ;"
620 "bl %[callee] ;"
621 PPC_LL " %%r1, 0(%%r1) ;"
622 : // Outputs
623 : // Inputs
624 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
625 [callee] "i" (__do_softirq)
626 : // Clobbers
627 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
628 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
629 "r11", "r12"
630 );
631 }
632
call_do_irq(struct pt_regs * regs,void * sp)633 static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
634 {
635 register unsigned long r3 asm("r3") = (unsigned long)regs;
636
637 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
638 asm volatile (
639 PPC_STLU " %%r1, %[offset](%[sp]) ;"
640 "mr %%r1, %[sp] ;"
641 "bl %[callee] ;"
642 PPC_LL " %%r1, 0(%%r1) ;"
643 : // Outputs
644 "+r" (r3)
645 : // Inputs
646 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
647 [callee] "i" (__do_irq)
648 : // Clobbers
649 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
650 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
651 "r11", "r12"
652 );
653 }
654
655 DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
656
__do_irq(struct pt_regs * regs)657 void __do_irq(struct pt_regs *regs)
658 {
659 unsigned int irq;
660
661 trace_irq_entry(regs);
662
663 /*
664 * Query the platform PIC for the interrupt & ack it.
665 *
666 * This will typically lower the interrupt line to the CPU
667 */
668 irq = static_call(ppc_get_irq)();
669
670 /* We can hard enable interrupts now to allow perf interrupts */
671 if (should_hard_irq_enable())
672 do_hard_irq_enable();
673
674 /* And finally process it */
675 if (unlikely(!irq))
676 __this_cpu_inc(irq_stat.spurious_irqs);
677 else
678 generic_handle_irq(irq);
679
680 trace_irq_exit(regs);
681 }
682
__do_IRQ(struct pt_regs * regs)683 void __do_IRQ(struct pt_regs *regs)
684 {
685 struct pt_regs *old_regs = set_irq_regs(regs);
686 void *cursp, *irqsp, *sirqsp;
687
688 /* Switch to the irq stack to handle this */
689 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
690 irqsp = hardirq_ctx[raw_smp_processor_id()];
691 sirqsp = softirq_ctx[raw_smp_processor_id()];
692
693 check_stack_overflow();
694
695 /* Already there ? */
696 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
697 __do_irq(regs);
698 set_irq_regs(old_regs);
699 return;
700 }
701 /* Switch stack and call */
702 call_do_irq(regs, irqsp);
703
704 set_irq_regs(old_regs);
705 }
706
DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)707 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
708 {
709 __do_IRQ(regs);
710 }
711
alloc_vm_stack(void)712 static void *__init alloc_vm_stack(void)
713 {
714 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
715 NUMA_NO_NODE, (void *)_RET_IP_);
716 }
717
vmap_irqstack_init(void)718 static void __init vmap_irqstack_init(void)
719 {
720 int i;
721
722 for_each_possible_cpu(i) {
723 softirq_ctx[i] = alloc_vm_stack();
724 hardirq_ctx[i] = alloc_vm_stack();
725 }
726 }
727
728
init_IRQ(void)729 void __init init_IRQ(void)
730 {
731 if (IS_ENABLED(CONFIG_VMAP_STACK))
732 vmap_irqstack_init();
733
734 if (ppc_md.init_IRQ)
735 ppc_md.init_IRQ();
736
737 if (!WARN_ON(!ppc_md.get_irq))
738 static_call_update(ppc_get_irq, ppc_md.get_irq);
739 }
740
741 #ifdef CONFIG_BOOKE_OR_40x
742 void *critirq_ctx[NR_CPUS] __read_mostly;
743 void *dbgirq_ctx[NR_CPUS] __read_mostly;
744 void *mcheckirq_ctx[NR_CPUS] __read_mostly;
745 #endif
746
747 void *softirq_ctx[NR_CPUS] __read_mostly;
748 void *hardirq_ctx[NR_CPUS] __read_mostly;
749
do_softirq_own_stack(void)750 void do_softirq_own_stack(void)
751 {
752 call_do_softirq(softirq_ctx[smp_processor_id()]);
753 }
754
virq_to_hw(unsigned int virq)755 irq_hw_number_t virq_to_hw(unsigned int virq)
756 {
757 struct irq_data *irq_data = irq_get_irq_data(virq);
758 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
759 }
760 EXPORT_SYMBOL_GPL(virq_to_hw);
761
762 #ifdef CONFIG_SMP
irq_choose_cpu(const struct cpumask * mask)763 int irq_choose_cpu(const struct cpumask *mask)
764 {
765 int cpuid;
766
767 if (cpumask_equal(mask, cpu_online_mask)) {
768 static int irq_rover;
769 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
770 unsigned long flags;
771
772 /* Round-robin distribution... */
773 do_round_robin:
774 raw_spin_lock_irqsave(&irq_rover_lock, flags);
775
776 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
777 if (irq_rover >= nr_cpu_ids)
778 irq_rover = cpumask_first(cpu_online_mask);
779
780 cpuid = irq_rover;
781
782 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
783 } else {
784 cpuid = cpumask_first_and(mask, cpu_online_mask);
785 if (cpuid >= nr_cpu_ids)
786 goto do_round_robin;
787 }
788
789 return get_hard_smp_processor_id(cpuid);
790 }
791 #else
irq_choose_cpu(const struct cpumask * mask)792 int irq_choose_cpu(const struct cpumask *mask)
793 {
794 return hard_smp_processor_id();
795 }
796 #endif
797
798 #ifdef CONFIG_PPC64
setup_noirqdistrib(char * str)799 static int __init setup_noirqdistrib(char *str)
800 {
801 distribute_irqs = 0;
802 return 1;
803 }
804
805 __setup("noirqdistrib", setup_noirqdistrib);
806 #endif /* CONFIG_PPC64 */
807