1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/parisc/traps.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
7 */
8
9 /*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
13
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32 #include <linux/kdebug.h>
33 #include <linux/kfence.h>
34
35 #include <asm/assembly.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/traps.h>
39 #include <asm/unaligned.h>
40 #include <linux/atomic.h>
41 #include <asm/smp.h>
42 #include <asm/pdc.h>
43 #include <asm/pdc_chassis.h>
44 #include <asm/unwind.h>
45 #include <asm/tlbflush.h>
46 #include <asm/cacheflush.h>
47 #include <linux/kgdb.h>
48 #include <linux/kprobes.h>
49
50 #include "../math-emu/math-emu.h" /* for handle_fpe() */
51
52 static void parisc_show_stack(struct task_struct *task,
53 struct pt_regs *regs, const char *loglvl);
54
printbinary(char * buf,unsigned long x,int nbits)55 static int printbinary(char *buf, unsigned long x, int nbits)
56 {
57 unsigned long mask = 1UL << (nbits - 1);
58 while (mask != 0) {
59 *buf++ = (mask & x ? '1' : '0');
60 mask >>= 1;
61 }
62 *buf = '\0';
63
64 return nbits;
65 }
66
67 #ifdef CONFIG_64BIT
68 #define RFMT "%016lx"
69 #else
70 #define RFMT "%08lx"
71 #endif
72 #define FFMT "%016llx" /* fpregs are 64-bit always */
73
74 #define PRINTREGS(lvl,r,f,fmt,x) \
75 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
76 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
77 (r)[(x)+2], (r)[(x)+3])
78
print_gr(const char * level,struct pt_regs * regs)79 static void print_gr(const char *level, struct pt_regs *regs)
80 {
81 int i;
82 char buf[64];
83
84 printk("%s\n", level);
85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 printbinary(buf, regs->gr[0], 32);
87 printk("%sPSW: %s %s\n", level, buf, print_tainted());
88
89 for (i = 0; i < 32; i += 4)
90 PRINTREGS(level, regs->gr, "r", RFMT, i);
91 }
92
print_fr(const char * level,struct pt_regs * regs)93 static void print_fr(const char *level, struct pt_regs *regs)
94 {
95 int i;
96 char buf[64];
97 struct { u32 sw[2]; } s;
98
99 /* FR are 64bit everywhere. Need to use asm to get the content
100 * of fpsr/fper1, and we assume that we won't have a FP Identify
101 * in our way, otherwise we're screwed.
102 * The fldd is used to restore the T-bit if there was one, as the
103 * store clears it anyway.
104 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105 asm volatile ("fstd %%fr0,0(%1) \n\t"
106 "fldd 0(%1),%%fr0 \n\t"
107 : "=m" (s) : "r" (&s) : "r0");
108
109 printk("%s\n", level);
110 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111 printbinary(buf, s.sw[0], 32);
112 printk("%sFPSR: %s\n", level, buf);
113 printk("%sFPER1: %08x\n", level, s.sw[1]);
114
115 /* here we'll print fr0 again, tho it'll be meaningless */
116 for (i = 0; i < 32; i += 4)
117 PRINTREGS(level, regs->fr, "fr", FFMT, i);
118 }
119
show_regs(struct pt_regs * regs)120 void show_regs(struct pt_regs *regs)
121 {
122 int i, user;
123 const char *level;
124 unsigned long cr30, cr31;
125
126 user = user_mode(regs);
127 level = user ? KERN_DEBUG : KERN_CRIT;
128
129 show_regs_print_info(level);
130
131 print_gr(level, regs);
132
133 for (i = 0; i < 8; i += 4)
134 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135
136 if (user)
137 print_fr(level, regs);
138
139 cr30 = mfctl(30);
140 cr31 = mfctl(31);
141 printk("%s\n", level);
142 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
143 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
144 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
145 level, regs->iir, regs->isr, regs->ior);
146 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
147 level, task_cpu(current), cr30, cr31);
148 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149
150 if (user) {
151 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
152 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
153 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
154 } else {
155 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
156 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
157 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158
159 parisc_show_stack(current, regs, KERN_DEFAULT);
160 }
161 }
162
163 static DEFINE_RATELIMIT_STATE(_hppa_rs,
164 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
165
166 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
167 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
168 printk(fmt, ##__VA_ARGS__); \
169 show_regs(regs); \
170 } \
171 }
172
173
do_show_stack(struct unwind_frame_info * info,const char * loglvl)174 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
175 {
176 int i = 1;
177
178 printk("%sBacktrace:\n", loglvl);
179 while (i <= MAX_UNWIND_ENTRIES) {
180 if (unwind_once(info) < 0 || info->ip == 0)
181 break;
182
183 if (__kernel_text_address(info->ip)) {
184 printk("%s [<" RFMT ">] %pS\n",
185 loglvl, info->ip, (void *) info->ip);
186 i++;
187 }
188 }
189 printk("%s\n", loglvl);
190 }
191
parisc_show_stack(struct task_struct * task,struct pt_regs * regs,const char * loglvl)192 static void parisc_show_stack(struct task_struct *task,
193 struct pt_regs *regs, const char *loglvl)
194 {
195 struct unwind_frame_info info;
196
197 unwind_frame_init_task(&info, task, regs);
198
199 do_show_stack(&info, loglvl);
200 }
201
show_stack(struct task_struct * t,unsigned long * sp,const char * loglvl)202 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
203 {
204 parisc_show_stack(t, NULL, loglvl);
205 }
206
is_valid_bugaddr(unsigned long iaoq)207 int is_valid_bugaddr(unsigned long iaoq)
208 {
209 return 1;
210 }
211
die_if_kernel(char * str,struct pt_regs * regs,long err)212 void die_if_kernel(char *str, struct pt_regs *regs, long err)
213 {
214 if (user_mode(regs)) {
215 if (err == 0)
216 return; /* STFU */
217
218 parisc_printk_ratelimited(1, regs,
219 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
220 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
221
222 return;
223 }
224
225 bust_spinlocks(1);
226
227 oops_enter();
228
229 /* Amuse the user in a SPARC fashion */
230 if (err) printk(KERN_CRIT
231 " _______________________________ \n"
232 " < Your System ate a SPARC! Gah! >\n"
233 " ------------------------------- \n"
234 " \\ ^__^\n"
235 " (__)\\ )\\/\\\n"
236 " U ||----w |\n"
237 " || ||\n");
238
239 /* unlock the pdc lock if necessary */
240 pdc_emergency_unlock();
241
242 /* maybe the kernel hasn't booted very far yet and hasn't been able
243 * to initialize the serial or STI console. In that case we should
244 * re-enable the pdc console, so that the user will be able to
245 * identify the problem. */
246 if (!console_drivers)
247 pdc_console_restart();
248
249 if (err)
250 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
251 current->comm, task_pid_nr(current), str, err);
252
253 /* Wot's wrong wif bein' racy? */
254 if (current->thread.flags & PARISC_KERNEL_DEATH) {
255 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
256 local_irq_enable();
257 while (1);
258 }
259 current->thread.flags |= PARISC_KERNEL_DEATH;
260
261 show_regs(regs);
262 dump_stack();
263 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
264
265 if (in_interrupt())
266 panic("Fatal exception in interrupt");
267
268 if (panic_on_oops)
269 panic("Fatal exception");
270
271 oops_exit();
272 make_task_dead(SIGSEGV);
273 }
274
275 /* gdb uses break 4,8 */
276 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)277 static void handle_gdb_break(struct pt_regs *regs, int wot)
278 {
279 force_sig_fault(SIGTRAP, wot,
280 (void __user *) (regs->iaoq[0] & ~3));
281 }
282
handle_break(struct pt_regs * regs)283 static void handle_break(struct pt_regs *regs)
284 {
285 unsigned iir = regs->iir;
286
287 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
288 /* check if a BUG() or WARN() trapped here. */
289 enum bug_trap_type tt;
290 tt = report_bug(regs->iaoq[0] & ~3, regs);
291 if (tt == BUG_TRAP_TYPE_WARN) {
292 regs->iaoq[0] += 4;
293 regs->iaoq[1] += 4;
294 return; /* return to next instruction when WARN_ON(). */
295 }
296 die_if_kernel("Unknown kernel breakpoint", regs,
297 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
298 }
299
300 #ifdef CONFIG_KPROBES
301 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
302 parisc_kprobe_break_handler(regs);
303 return;
304 }
305 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
306 parisc_kprobe_ss_handler(regs);
307 return;
308 }
309 #endif
310
311 #ifdef CONFIG_KGDB
312 if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
313 iir == PARISC_KGDB_BREAK_INSN)) {
314 kgdb_handle_exception(9, SIGTRAP, 0, regs);
315 return;
316 }
317 #endif
318
319 if (unlikely(iir != GDB_BREAK_INSN))
320 parisc_printk_ratelimited(0, regs,
321 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
322 iir & 31, (iir>>13) & ((1<<13)-1),
323 task_pid_nr(current), current->comm);
324
325 /* send standard GDB signal */
326 handle_gdb_break(regs, TRAP_BRKPT);
327 }
328
default_trap(int code,struct pt_regs * regs)329 static void default_trap(int code, struct pt_regs *regs)
330 {
331 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
332 show_regs(regs);
333 }
334
335 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
336
337
transfer_pim_to_trap_frame(struct pt_regs * regs)338 void transfer_pim_to_trap_frame(struct pt_regs *regs)
339 {
340 register int i;
341 extern unsigned int hpmc_pim_data[];
342 struct pdc_hpmc_pim_11 *pim_narrow;
343 struct pdc_hpmc_pim_20 *pim_wide;
344
345 if (boot_cpu_data.cpu_type >= pcxu) {
346
347 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
348
349 /*
350 * Note: The following code will probably generate a
351 * bunch of truncation error warnings from the compiler.
352 * Could be handled with an ifdef, but perhaps there
353 * is a better way.
354 */
355
356 regs->gr[0] = pim_wide->cr[22];
357
358 for (i = 1; i < 32; i++)
359 regs->gr[i] = pim_wide->gr[i];
360
361 for (i = 0; i < 32; i++)
362 regs->fr[i] = pim_wide->fr[i];
363
364 for (i = 0; i < 8; i++)
365 regs->sr[i] = pim_wide->sr[i];
366
367 regs->iasq[0] = pim_wide->cr[17];
368 regs->iasq[1] = pim_wide->iasq_back;
369 regs->iaoq[0] = pim_wide->cr[18];
370 regs->iaoq[1] = pim_wide->iaoq_back;
371
372 regs->sar = pim_wide->cr[11];
373 regs->iir = pim_wide->cr[19];
374 regs->isr = pim_wide->cr[20];
375 regs->ior = pim_wide->cr[21];
376 }
377 else {
378 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
379
380 regs->gr[0] = pim_narrow->cr[22];
381
382 for (i = 1; i < 32; i++)
383 regs->gr[i] = pim_narrow->gr[i];
384
385 for (i = 0; i < 32; i++)
386 regs->fr[i] = pim_narrow->fr[i];
387
388 for (i = 0; i < 8; i++)
389 regs->sr[i] = pim_narrow->sr[i];
390
391 regs->iasq[0] = pim_narrow->cr[17];
392 regs->iasq[1] = pim_narrow->iasq_back;
393 regs->iaoq[0] = pim_narrow->cr[18];
394 regs->iaoq[1] = pim_narrow->iaoq_back;
395
396 regs->sar = pim_narrow->cr[11];
397 regs->iir = pim_narrow->cr[19];
398 regs->isr = pim_narrow->cr[20];
399 regs->ior = pim_narrow->cr[21];
400 }
401
402 /*
403 * The following fields only have meaning if we came through
404 * another path. So just zero them here.
405 */
406
407 regs->ksp = 0;
408 regs->kpc = 0;
409 regs->orig_r28 = 0;
410 }
411
412
413 /*
414 * This routine is called as a last resort when everything else
415 * has gone clearly wrong. We get called for faults in kernel space,
416 * and HPMC's.
417 */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)418 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
419 {
420 static DEFINE_SPINLOCK(terminate_lock);
421
422 (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
423 bust_spinlocks(1);
424
425 set_eiem(0);
426 local_irq_disable();
427 spin_lock(&terminate_lock);
428
429 /* unlock the pdc lock if necessary */
430 pdc_emergency_unlock();
431
432 /* restart pdc console if necessary */
433 if (!console_drivers)
434 pdc_console_restart();
435
436 /* Not all paths will gutter the processor... */
437 switch(code){
438
439 case 1:
440 transfer_pim_to_trap_frame(regs);
441 break;
442
443 default:
444 break;
445
446 }
447
448 {
449 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
450 struct unwind_frame_info info;
451 unwind_frame_init(&info, current, regs);
452 do_show_stack(&info, KERN_CRIT);
453 }
454
455 printk("\n");
456 pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
457 msg, code, trap_name(code), offset);
458 show_regs(regs);
459
460 spin_unlock(&terminate_lock);
461
462 /* put soft power button back under hardware control;
463 * if the user had pressed it once at any time, the
464 * system will shut down immediately right here. */
465 pdc_soft_power_button(0);
466
467 /* Call kernel panic() so reboot timeouts work properly
468 * FIXME: This function should be on the list of
469 * panic notifiers, and we should call panic
470 * directly from the location that we wish.
471 * e.g. We should not call panic from
472 * parisc_terminate, but rather the other way around.
473 * This hack works, prints the panic message twice,
474 * and it enables reboot timers!
475 */
476 panic(msg);
477 }
478
handle_interruption(int code,struct pt_regs * regs)479 void notrace handle_interruption(int code, struct pt_regs *regs)
480 {
481 unsigned long fault_address = 0;
482 unsigned long fault_space = 0;
483 int si_code;
484
485 if (code == 1)
486 pdc_console_restart(); /* switch back to pdc if HPMC */
487 else if (!irqs_disabled_flags(regs->gr[0]))
488 local_irq_enable();
489
490 /* Security check:
491 * If the priority level is still user, and the
492 * faulting space is not equal to the active space
493 * then the user is attempting something in a space
494 * that does not belong to them. Kill the process.
495 *
496 * This is normally the situation when the user
497 * attempts to jump into the kernel space at the
498 * wrong offset, be it at the gateway page or a
499 * random location.
500 *
501 * We cannot normally signal the process because it
502 * could *be* on the gateway page, and processes
503 * executing on the gateway page can't have signals
504 * delivered.
505 *
506 * We merely readjust the address into the users
507 * space, at a destination address of zero, and
508 * allow processing to continue.
509 */
510 if (((unsigned long)regs->iaoq[0] & 3) &&
511 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
512 /* Kill the user process later */
513 regs->iaoq[0] = 0 | 3;
514 regs->iaoq[1] = regs->iaoq[0] + 4;
515 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
516 regs->gr[0] &= ~PSW_B;
517 return;
518 }
519
520 #if 0
521 printk(KERN_CRIT "Interruption # %d\n", code);
522 #endif
523
524 switch(code) {
525
526 case 1:
527 /* High-priority machine check (HPMC) */
528
529 /* set up a new led state on systems shipped with a LED State panel */
530 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
531
532 parisc_terminate("High Priority Machine Check (HPMC)",
533 regs, code, 0);
534 /* NOT REACHED */
535
536 case 2:
537 /* Power failure interrupt */
538 printk(KERN_CRIT "Power failure interrupt !\n");
539 return;
540
541 case 3:
542 /* Recovery counter trap */
543 regs->gr[0] &= ~PSW_R;
544
545 #ifdef CONFIG_KGDB
546 if (kgdb_single_step) {
547 kgdb_handle_exception(0, SIGTRAP, 0, regs);
548 return;
549 }
550 #endif
551
552 if (user_space(regs))
553 handle_gdb_break(regs, TRAP_TRACE);
554 /* else this must be the start of a syscall - just let it run */
555 return;
556
557 case 5:
558 /* Low-priority machine check */
559 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
560
561 flush_cache_all();
562 flush_tlb_all();
563 cpu_lpmc(5, regs);
564 return;
565
566 case PARISC_ITLB_TRAP:
567 /* Instruction TLB miss fault/Instruction page fault */
568 fault_address = regs->iaoq[0];
569 fault_space = regs->iasq[0];
570 break;
571
572 case 8:
573 /* Illegal instruction trap */
574 die_if_kernel("Illegal instruction", regs, code);
575 si_code = ILL_ILLOPC;
576 goto give_sigill;
577
578 case 9:
579 /* Break instruction trap */
580 handle_break(regs);
581 return;
582
583 case 10:
584 /* Privileged operation trap */
585 die_if_kernel("Privileged operation", regs, code);
586 si_code = ILL_PRVOPC;
587 goto give_sigill;
588
589 case 11:
590 /* Privileged register trap */
591 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
592
593 /* This is a MFCTL cr26/cr27 to gr instruction.
594 * PCXS traps on this, so we need to emulate it.
595 */
596
597 if (regs->iir & 0x00200000)
598 regs->gr[regs->iir & 0x1f] = mfctl(27);
599 else
600 regs->gr[regs->iir & 0x1f] = mfctl(26);
601
602 regs->iaoq[0] = regs->iaoq[1];
603 regs->iaoq[1] += 4;
604 regs->iasq[0] = regs->iasq[1];
605 return;
606 }
607
608 die_if_kernel("Privileged register usage", regs, code);
609 si_code = ILL_PRVREG;
610 give_sigill:
611 force_sig_fault(SIGILL, si_code,
612 (void __user *) regs->iaoq[0]);
613 return;
614
615 case 12:
616 /* Overflow Trap, let the userland signal handler do the cleanup */
617 force_sig_fault(SIGFPE, FPE_INTOVF,
618 (void __user *) regs->iaoq[0]);
619 return;
620
621 case 13:
622 /* Conditional Trap
623 The condition succeeds in an instruction which traps
624 on condition */
625 if(user_mode(regs)){
626 /* Let userspace app figure it out from the insn pointed
627 * to by si_addr.
628 */
629 force_sig_fault(SIGFPE, FPE_CONDTRAP,
630 (void __user *) regs->iaoq[0]);
631 return;
632 }
633 /* The kernel doesn't want to handle condition codes */
634 break;
635
636 case 14:
637 /* Assist Exception Trap, i.e. floating point exception. */
638 die_if_kernel("Floating point exception", regs, 0); /* quiet */
639 __inc_irq_stat(irq_fpassist_count);
640 handle_fpe(regs);
641 return;
642
643 case 15:
644 /* Data TLB miss fault/Data page fault */
645 fallthrough;
646 case 16:
647 /* Non-access instruction TLB miss fault */
648 /* The instruction TLB entry needed for the target address of the FIC
649 is absent, and hardware can't find it, so we get to cleanup */
650 fallthrough;
651 case 17:
652 /* Non-access data TLB miss fault/Non-access data page fault */
653 /* FIXME:
654 Still need to add slow path emulation code here!
655 If the insn used a non-shadow register, then the tlb
656 handlers could not have their side-effect (e.g. probe
657 writing to a target register) emulated since rfir would
658 erase the changes to said register. Instead we have to
659 setup everything, call this function we are in, and emulate
660 by hand. Technically we need to emulate:
661 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
662 */
663 if (code == 17 && handle_nadtlb_fault(regs))
664 return;
665 fault_address = regs->ior;
666 fault_space = regs->isr;
667 break;
668
669 case 18:
670 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
671 /* Check for unaligned access */
672 if (check_unaligned(regs)) {
673 handle_unaligned(regs);
674 return;
675 }
676 fallthrough;
677 case 26:
678 /* PCXL: Data memory access rights trap */
679 fault_address = regs->ior;
680 fault_space = regs->isr;
681 break;
682
683 case 19:
684 /* Data memory break trap */
685 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
686 fallthrough;
687 case 21:
688 /* Page reference trap */
689 handle_gdb_break(regs, TRAP_HWBKPT);
690 return;
691
692 case 25:
693 /* Taken branch trap */
694 regs->gr[0] &= ~PSW_T;
695 if (user_space(regs))
696 handle_gdb_break(regs, TRAP_BRANCH);
697 /* else this must be the start of a syscall - just let it
698 * run.
699 */
700 return;
701
702 case 7:
703 /* Instruction access rights */
704 /* PCXL: Instruction memory protection trap */
705
706 /*
707 * This could be caused by either: 1) a process attempting
708 * to execute within a vma that does not have execute
709 * permission, or 2) an access rights violation caused by a
710 * flush only translation set up by ptep_get_and_clear().
711 * So we check the vma permissions to differentiate the two.
712 * If the vma indicates we have execute permission, then
713 * the cause is the latter one. In this case, we need to
714 * call do_page_fault() to fix the problem.
715 */
716
717 if (user_mode(regs)) {
718 struct vm_area_struct *vma;
719
720 mmap_read_lock(current->mm);
721 vma = find_vma(current->mm,regs->iaoq[0]);
722 if (vma && (regs->iaoq[0] >= vma->vm_start)
723 && (vma->vm_flags & VM_EXEC)) {
724
725 fault_address = regs->iaoq[0];
726 fault_space = regs->iasq[0];
727
728 mmap_read_unlock(current->mm);
729 break; /* call do_page_fault() */
730 }
731 mmap_read_unlock(current->mm);
732 }
733 /* CPU could not fetch instruction, so clear stale IIR value. */
734 regs->iir = 0xbaadf00d;
735 fallthrough;
736 case 27:
737 /* Data memory protection ID trap */
738 if (code == 27 && !user_mode(regs) &&
739 fixup_exception(regs))
740 return;
741
742 die_if_kernel("Protection id trap", regs, code);
743 force_sig_fault(SIGSEGV, SEGV_MAPERR,
744 (code == 7)?
745 ((void __user *) regs->iaoq[0]) :
746 ((void __user *) regs->ior));
747 return;
748
749 case 28:
750 /* Unaligned data reference trap */
751 handle_unaligned(regs);
752 return;
753
754 default:
755 if (user_mode(regs)) {
756 parisc_printk_ratelimited(0, regs, KERN_DEBUG
757 "handle_interruption() pid=%d command='%s'\n",
758 task_pid_nr(current), current->comm);
759 /* SIGBUS, for lack of a better one. */
760 force_sig_fault(SIGBUS, BUS_OBJERR,
761 (void __user *)regs->ior);
762 return;
763 }
764 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
765
766 parisc_terminate("Unexpected interruption", regs, code, 0);
767 /* NOT REACHED */
768 }
769
770 if (user_mode(regs)) {
771 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
772 parisc_printk_ratelimited(0, regs, KERN_DEBUG
773 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
774 code, fault_space,
775 task_pid_nr(current), current->comm);
776 force_sig_fault(SIGSEGV, SEGV_MAPERR,
777 (void __user *)regs->ior);
778 return;
779 }
780 }
781 else {
782
783 /*
784 * The kernel should never fault on its own address space,
785 * unless pagefault_disable() was called before.
786 */
787
788 if (faulthandler_disabled() || fault_space == 0)
789 {
790 /* Clean up and return if in exception table. */
791 if (fixup_exception(regs))
792 return;
793 /* Clean up and return if handled by kfence. */
794 if (kfence_handle_page_fault(fault_address,
795 parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
796 return;
797 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
798 parisc_terminate("Kernel Fault", regs, code, fault_address);
799 }
800 }
801
802 do_page_fault(regs, code, fault_address);
803 }
804
805
initialize_ivt(const void * iva)806 void __init initialize_ivt(const void *iva)
807 {
808 extern const u32 os_hpmc[];
809
810 int i;
811 u32 check = 0;
812 u32 *ivap;
813 u32 *hpmcp;
814 u32 instr;
815
816 if (strcmp((const char *)iva, "cows can fly"))
817 panic("IVT invalid");
818
819 ivap = (u32 *)iva;
820
821 for (i = 0; i < 8; i++)
822 *ivap++ = 0;
823
824 /*
825 * Use PDC_INSTR firmware function to get instruction that invokes
826 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
827 * the PA 1.1 Firmware Architecture document.
828 */
829 if (pdc_instr(&instr) == PDC_OK)
830 ivap[0] = instr;
831
832 /*
833 * Rules for the checksum of the HPMC handler:
834 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
835 * its own IVA).
836 * 2. The word at IVA + 32 is nonzero.
837 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
838 * Address (IVA + 56) are word-aligned.
839 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
840 * the Length/4 words starting at Address is zero.
841 */
842
843 /* Setup IVA and compute checksum for HPMC handler */
844 ivap[6] = (u32)__pa(os_hpmc);
845
846 hpmcp = (u32 *)os_hpmc;
847
848 for (i=0; i<8; i++)
849 check += ivap[i];
850
851 ivap[5] = -check;
852 pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
853 }
854
855
856 /* early_trap_init() is called before we set up kernel mappings and
857 * write-protect the kernel */
early_trap_init(void)858 void __init early_trap_init(void)
859 {
860 extern const void fault_vector_20;
861
862 #ifndef CONFIG_64BIT
863 extern const void fault_vector_11;
864 initialize_ivt(&fault_vector_11);
865 #endif
866
867 initialize_ivt(&fault_vector_20);
868 }
869