1 /*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13 #include <linux/config.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/timer.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/console.h>
28
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <asm/traps.h>
34 #include <asm/unaligned.h>
35 #include <asm/atomic.h>
36 #include <asm/smp.h>
37 #include <asm/pdc.h>
38 #include <asm/pdc_chassis.h>
39
40 #include "../math-emu/math-emu.h" /* for handle_fpe() */
41
42 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
43 /* dumped to the console via printk) */
44
printbinary(char * buf,unsigned long x,int nbits)45 static int printbinary(char *buf, unsigned long x, int nbits)
46 {
47 unsigned long mask = 1UL << (nbits - 1);
48 while (mask != 0) {
49 *buf++ = (mask & x ? '1' : '0');
50 mask >>= 1;
51 }
52 *buf = '\0';
53
54 return nbits;
55 }
56
57 #ifdef __LP64__
58 #define RFMT "%016lx"
59 #else
60 #define RFMT "%08lx"
61 #endif
62
63 static int kstack_depth_to_print = 24;
64 extern struct module *module_list;
65 extern struct module kernel_module;
66
kernel_text_address(unsigned long addr)67 static inline int kernel_text_address(unsigned long addr)
68 {
69 #ifdef CONFIG_MODULES
70 int retval = 0;
71 struct module *mod;
72 #endif
73 extern char _stext, _etext;
74
75 if (addr >= (unsigned long) &_stext &&
76 addr <= (unsigned long) &_etext)
77 return 1;
78
79 #ifdef CONFIG_MODULES
80 for (mod = module_list; mod != &kernel_module; mod = mod->next) {
81 /* mod_bound tests for addr being inside the vmalloc'ed
82 * module area. Of course it'd be better to test only
83 * for the .text subset... */
84 if (mod_bound(addr, 0, mod)) {
85 retval = 1;
86 break;
87 }
88 }
89 return retval;
90 #endif
91 }
92
93
show_trace(unsigned long * stack)94 void show_trace(unsigned long * stack)
95 {
96 unsigned long *startstack;
97 unsigned long addr;
98 int i;
99
100 startstack = (unsigned long *)((unsigned long)stack & ~(THREAD_SIZE - 1));
101 i = 1;
102 printk("Kernel addresses on the stack:\n");
103 while (stack >= startstack) {
104 addr = *stack--;
105 if (kernel_text_address(addr)) {
106 printk(" [<" RFMT ">] ", addr);
107 if ((i & 0x03) == 0)
108 printk("\n");
109 i++;
110 }
111 }
112 printk("\n");
113 }
114
show_trace_task(struct task_struct * tsk)115 void show_trace_task(struct task_struct *tsk)
116 {
117 show_trace((unsigned long *)tsk->thread.regs.ksp);
118 }
119
show_stack(unsigned long * sp)120 void show_stack(unsigned long * sp)
121 {
122 unsigned long *stack;
123 int i;
124
125 /*
126 * debugging aid: "show_stack(NULL);" prints the
127 * back trace for this cpu.
128 */
129 if (sp==NULL)
130 sp = (unsigned long*)&sp;
131
132 stack = sp;
133 printk("\n" KERN_CRIT "Stack Dump:\n");
134 printk(KERN_CRIT " " RFMT ": ", (unsigned long) stack);
135 for (i=0; i < kstack_depth_to_print; i++) {
136 if (((long) stack & (THREAD_SIZE-1)) == 0)
137 break;
138 if (i && ((i & 0x03) == 0))
139 printk("\n" KERN_CRIT " " RFMT ": ",
140 (unsigned long) stack);
141 printk(RFMT " ", *stack--);
142 }
143 printk("\n" KERN_CRIT "\n");
144 show_trace(sp);
145 }
146
147 /*
148 * The architecture-independent backtrace generator
149 */
dump_stack(void)150 void dump_stack(void)
151 {
152 show_stack(0);
153 }
154
155
show_regs(struct pt_regs * regs)156 void show_regs(struct pt_regs *regs)
157 {
158 int i;
159 char buf[128], *p;
160 char *level;
161 unsigned long cr30;
162 unsigned long cr31;
163
164 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
165
166 printk("%s\n", level); /* don't want to have that pretty register dump messed up */
167
168 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
169 printbinary(buf, regs->gr[0], 32);
170 printk("%sPSW: %s %s\n", level, buf, print_tainted());
171
172 for (i = 0; i < 32; i += 4) {
173 int j;
174 p = buf;
175 p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
176 for (j = 0; j < 4; j++) {
177 p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
178 }
179 printk("%s\n", buf);
180 }
181
182 for (i = 0; i < 8; i += 4) {
183 int j;
184 p = buf;
185 p += sprintf(p, "%ssr%d-%d ", level, i, i + 3);
186 for (j = 0; j < 4; j++) {
187 p += sprintf(p, " " RFMT, regs->sr[i + j]);
188 }
189 printk("%s\n", buf);
190 }
191
192 #if RIDICULOUSLY_VERBOSE
193 for (i = 0; i < 32; i += 2)
194 printk("%sFR%02d : %016lx FR%2d : %016lx", level, i,
195 regs->fr[i], i+1, regs->fr[i+1]);
196 #endif
197
198 cr30 = mfctl(30);
199 cr31 = mfctl(31);
200 printk("%s\n", level);
201 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
202 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
203 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
204 level, regs->iir, regs->isr, regs->ior);
205 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
206 level, ((struct task_struct *)cr30)->processor, cr30, cr31);
207 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
208 }
209
210
die_if_kernel(char * str,struct pt_regs * regs,long err)211 void die_if_kernel(char *str, struct pt_regs *regs, long err)
212 {
213 if (user_mode(regs)) {
214 #ifdef PRINT_USER_FAULTS
215 if (err == 0)
216 return; /* STFU */
217
218 /* XXX for debugging only */
219 printk(KERN_DEBUG "%s (pid %d): %s (code %ld)\n",
220 current->comm, current->pid, str, err);
221 show_regs(regs);
222 #endif
223 return;
224 }
225
226 /* unlock the pdc lock if necessary */
227 pdc_emergency_unlock();
228
229 /* maybe the kernel hasn't booted very far yet and hasn't been able
230 * to initialize the serial or STI console. In that case we should
231 * re-enable the pdc console, so that the user will be able to
232 * identify the problem. */
233 if (!console_drivers)
234 pdc_console_restart();
235
236 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
237 current->comm, current->pid, str, err);
238 show_regs(regs);
239
240 /* Wot's wrong wif bein' racy? */
241 if (current->thread.flags & PARISC_KERNEL_DEATH) {
242 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
243 sti();
244 while (1);
245 }
246
247 current->thread.flags |= PARISC_KERNEL_DEATH;
248 do_exit(SIGSEGV);
249 }
250
syscall_ipi(int (* syscall)(struct pt_regs *),struct pt_regs * regs)251 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
252 {
253 return syscall(regs);
254 }
255
256 /* gdb uses break 4,8 */
257 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)258 void handle_gdb_break(struct pt_regs *regs, int wot)
259 {
260 struct siginfo si;
261
262 si.si_code = wot;
263 si.si_addr = (void *) (regs->iaoq[0] & ~3);
264 si.si_signo = SIGTRAP;
265 si.si_errno = 0;
266 force_sig_info(SIGTRAP, &si, current);
267 }
268
handle_break(unsigned iir,struct pt_regs * regs)269 void handle_break(unsigned iir, struct pt_regs *regs)
270 {
271 struct siginfo si;
272
273 switch(iir) {
274 case 0x00:
275 #ifdef PRINT_USER_FAULTS
276 printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
277 current->pid, current->comm);
278 #endif
279 die_if_kernel("Breakpoint", regs, 0);
280 #ifdef PRINT_USER_FAULTS
281 show_regs(regs);
282 #endif
283 si.si_code = TRAP_BRKPT;
284 si.si_addr = (void *) (regs->iaoq[0] & ~3);
285 si.si_signo = SIGTRAP;
286 force_sig_info(SIGTRAP, &si, current);
287 break;
288
289 case GDB_BREAK_INSN:
290 die_if_kernel("Breakpoint", regs, 0);
291 handle_gdb_break(regs, TRAP_BRKPT);
292 break;
293
294 default:
295 #ifdef PRINT_USER_FAULTS
296 printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
297 iir, current->pid, current->comm);
298 show_regs(regs);
299 #endif
300 si.si_signo = SIGTRAP;
301 si.si_code = TRAP_BRKPT;
302 si.si_addr = (void *) (regs->iaoq[0] & ~3);
303 force_sig_info(SIGTRAP, &si, current);
304 return;
305 }
306 }
307
308
handle_toc(void)309 int handle_toc(void)
310 {
311 printk(KERN_CRIT "TOC call.\n");
312 return 0;
313 }
314
default_trap(int code,struct pt_regs * regs)315 static void default_trap(int code, struct pt_regs *regs)
316 {
317 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
318 show_regs(regs);
319 }
320
321 void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
322
323
transfer_pim_to_trap_frame(struct pt_regs * regs)324 void transfer_pim_to_trap_frame(struct pt_regs *regs)
325 {
326 register int i;
327 extern unsigned int hpmc_pim_data[];
328 struct pdc_hpmc_pim_11 *pim_narrow;
329 struct pdc_hpmc_pim_20 *pim_wide;
330
331 if (boot_cpu_data.cpu_type >= pcxu) {
332
333 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
334
335 /*
336 * Note: The following code will probably generate a
337 * bunch of truncation error warnings from the compiler.
338 * Could be handled with an ifdef, but perhaps there
339 * is a better way.
340 */
341
342 regs->gr[0] = pim_wide->cr[22];
343
344 for (i = 1; i < 32; i++)
345 regs->gr[i] = pim_wide->gr[i];
346
347 for (i = 0; i < 32; i++)
348 regs->fr[i] = pim_wide->fr[i];
349
350 for (i = 0; i < 8; i++)
351 regs->sr[i] = pim_wide->sr[i];
352
353 regs->iasq[0] = pim_wide->cr[17];
354 regs->iasq[1] = pim_wide->iasq_back;
355 regs->iaoq[0] = pim_wide->cr[18];
356 regs->iaoq[1] = pim_wide->iaoq_back;
357
358 regs->sar = pim_wide->cr[11];
359 regs->iir = pim_wide->cr[19];
360 regs->isr = pim_wide->cr[20];
361 regs->ior = pim_wide->cr[21];
362 }
363 else {
364 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
365
366 regs->gr[0] = pim_narrow->cr[22];
367
368 for (i = 1; i < 32; i++)
369 regs->gr[i] = pim_narrow->gr[i];
370
371 for (i = 0; i < 32; i++)
372 regs->fr[i] = pim_narrow->fr[i];
373
374 for (i = 0; i < 8; i++)
375 regs->sr[i] = pim_narrow->sr[i];
376
377 regs->iasq[0] = pim_narrow->cr[17];
378 regs->iasq[1] = pim_narrow->iasq_back;
379 regs->iaoq[0] = pim_narrow->cr[18];
380 regs->iaoq[1] = pim_narrow->iaoq_back;
381
382 regs->sar = pim_narrow->cr[11];
383 regs->iir = pim_narrow->cr[19];
384 regs->isr = pim_narrow->cr[20];
385 regs->ior = pim_narrow->cr[21];
386 }
387
388 /*
389 * The following fields only have meaning if we came through
390 * another path. So just zero them here.
391 */
392
393 regs->ksp = 0;
394 regs->kpc = 0;
395 regs->orig_r28 = 0;
396 }
397
398
399 /*
400 * This routine handles various exception codes. It determines the address,
401 * and the problem, and then passes it off to one of the appropriate
402 * routines.
403 */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)404 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
405 {
406 static spinlock_t terminate_lock = SPIN_LOCK_UNLOCKED;
407
408 set_eiem(0);
409 __cli();
410 spin_lock(&terminate_lock);
411
412 /* unlock the pdc lock if necessary */
413 pdc_emergency_unlock();
414
415 /* restart pdc console if necessary */
416 if (!console_drivers)
417 pdc_console_restart();
418
419
420 /* Not all switch paths will gutter the processor... */
421 switch(code){
422
423 case 1:
424 transfer_pim_to_trap_frame(regs);
425 break;
426
427 default:
428 /* Fall through */
429 break;
430 }
431
432 show_stack((unsigned long *)regs->gr[30]);
433
434 printk("\n");
435 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
436 msg, code, regs, offset);
437 show_regs(regs);
438
439 spin_unlock(&terminate_lock);
440
441 /* put soft power button back under hardware control;
442 * if the user had pressed it once at any time, the
443 * system will shut down immediately right here. */
444 pdc_soft_power_button(0);
445
446 /* Gutter the processor... */
447 for(;;)
448 ;
449 }
450
451
handle_interruption(int code,struct pt_regs * regs)452 void handle_interruption(int code, struct pt_regs *regs)
453 {
454 unsigned long fault_address = 0;
455 unsigned long fault_space = 0;
456 struct siginfo si;
457
458 switch(code) {
459
460 case 1:
461 /* High-priority machine check (HPMC) */
462 pdc_console_restart(); /* switch back to pdc if HPMC */
463
464 /* set up a new led state on systems shipped with a LED State panel */
465 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
466
467 parisc_terminate("High Priority Machine Check (HPMC)",
468 regs, code, 0);
469 /* NOT REACHED */
470
471 case 2:
472 /* Power failure interrupt */
473 printk(KERN_CRIT "Power failure interrupt !\n");
474 return;
475
476 case 3:
477 /* Recovery counter trap */
478 regs->gr[0] &= ~PSW_R;
479 if (regs->iasq[0])
480 handle_gdb_break(regs, TRAP_TRACE);
481 /* else this must be the start of a syscall - just let it run */
482 return;
483
484 case 5:
485 /* Low-priority machine check */
486
487 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
488
489 flush_all_caches();
490 cpu_lpmc(5, regs);
491 return;
492
493 case 6:
494 /* Instruction TLB miss fault/Instruction page fault */
495 fault_address = regs->iaoq[0];
496 fault_space = regs->iasq[0];
497 break;
498
499 case 8:
500 /* Illegal instruction trap */
501 die_if_kernel("Illegal instruction", regs, code);
502 si.si_code = ILL_ILLOPC;
503 goto give_sigill;
504
505 case 9:
506 /* Break instruction trap */
507 handle_break(regs->iir,regs);
508 return;
509
510 case 10:
511 /* Privileged operation trap */
512 die_if_kernel("Privileged operation", regs, code);
513 si.si_code = ILL_PRVOPC;
514 goto give_sigill;
515
516 case 11:
517 /* Privileged register trap */
518 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
519
520 /* This is a MFCTL cr26/cr27 to gr instruction.
521 * PCXS traps on this, so we need to emulate it.
522 */
523
524 if (regs->iir & 0x00200000)
525 regs->gr[regs->iir & 0x1f] = mfctl(27);
526 else
527 regs->gr[regs->iir & 0x1f] = mfctl(26);
528
529 regs->iaoq[0] = regs->iaoq[1];
530 regs->iaoq[1] += 4;
531 regs->iasq[0] = regs->iasq[1];
532 return;
533 }
534
535 die_if_kernel("Privileged register usage", regs, code);
536 si.si_code = ILL_PRVREG;
537 /* Fall thru */
538 give_sigill:
539 si.si_signo = SIGILL;
540 si.si_errno = 0;
541 si.si_addr = (void *) regs->iaoq[0];
542 force_sig_info(SIGILL, &si, current);
543 return;
544
545 case 12:
546 /* Overflow Trap, let the userland signal handler do the cleanup */
547 si.si_signo = SIGFPE;
548 si.si_code = FPE_INTOVF;
549 si.si_addr = (void *) regs->iaoq[0];
550 force_sig_info(SIGFPE, &si, current);
551 return;
552
553 case 13:
554 /* Conditional Trap
555 The condition succees in an instruction which traps on condition */
556 si.si_signo = SIGFPE;
557 /* Set to zero, and let the userspace app figure it out from
558 the insn pointed to by si_addr */
559 si.si_code = 0;
560 si.si_addr = (void *) regs->iaoq[0];
561 force_sig_info(SIGFPE, &si, current);
562 return;
563
564 case 14:
565 /* Assist Exception Trap, i.e. floating point exception. */
566 die_if_kernel("Floating point exception", regs, 0); /* quiet */
567 handle_fpe(regs);
568 return;
569
570 case 15:
571 /* Data TLB miss fault/Data page fault */
572 /* Fall thru */
573 case 16:
574 /* Non-access instruction TLB miss fault */
575 /* The instruction TLB entry needed for the target address of the FIC
576 is absent, and hardware can't find it, so we get to cleanup */
577 /* Fall thru */
578 case 17:
579 /* Non-access data TLB miss fault/Non-access data page fault */
580 /* TODO: Still need to add slow path emulation code here */
581 /* TODO: Understand what is meant by the TODO listed
582 above this one. (Carlos) */
583 fault_address = regs->ior;
584 fault_space = regs->isr;
585 break;
586
587 case 18:
588 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
589 /* Check for unaligned access */
590 if (check_unaligned(regs)) {
591 handle_unaligned(regs);
592 return;
593 }
594 /* Fall Through */
595 case 26:
596 /* PCXL: Data memory access rights trap */
597 fault_address = regs->ior;
598 fault_space = regs->isr;
599 break;
600
601 case 19:
602 /* Data memory break trap */
603 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
604 /* fall thru */
605 case 21:
606 /* Page reference trap */
607 handle_gdb_break(regs, TRAP_HWBKPT);
608 return;
609
610 case 25:
611 /* Taken branch trap */
612 regs->gr[0] &= ~PSW_T;
613 if (regs->iasq[0])
614 handle_gdb_break(regs, TRAP_BRANCH);
615 /* else this must be the start of a syscall - just let it
616 * run.
617 */
618 return;
619
620 case 7:
621 /* Instruction access rights */
622 /* PCXL: Instruction memory protection trap */
623
624 /*
625 * This could be caused by either: 1) a process attempting
626 * to execute within a vma that does not have execute
627 * permission, or 2) an access rights violation caused by a
628 * flush only translation set up by ptep_get_and_clear().
629 * So we check the vma permissions to differentiate the two.
630 * If the vma indicates we have execute permission, then
631 * the cause is the latter one. In this case, we need to
632 * call do_page_fault() to fix the problem.
633 */
634
635 if (user_mode(regs)) {
636 struct vm_area_struct *vma;
637
638 down_read(¤t->mm->mmap_sem);
639 vma = find_vma(current->mm,regs->iaoq[0]);
640 if (vma && (regs->iaoq[0] >= vma->vm_start)
641 && (vma->vm_flags & VM_EXEC)) {
642
643 fault_address = regs->iaoq[0];
644 fault_space = regs->iasq[0];
645
646 up_read(¤t->mm->mmap_sem);
647 break; /* call do_page_fault() */
648 }
649 up_read(¤t->mm->mmap_sem);
650 }
651 /* Fall Through */
652 case 27:
653 /* Data memory protection ID trap */
654 die_if_kernel("Protection id trap", regs, code);
655 si.si_code = SEGV_MAPERR;
656 si.si_signo = SIGSEGV;
657 si.si_errno = 0;
658 if (code == 7)
659 si.si_addr = (void *) regs->iaoq[0];
660 else
661 si.si_addr = (void *) regs->ior;
662 force_sig_info(SIGSEGV, &si, current);
663 return;
664
665 case 28:
666 /* Unaligned data reference trap */
667 handle_unaligned(regs);
668 return;
669
670 default:
671 if (user_mode(regs)) {
672 #ifdef PRINT_USER_FAULTS
673 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
674 current->pid, current->comm);
675 show_regs(regs);
676 #endif
677 /* SIGBUS, for lack of a better one. */
678 si.si_signo = SIGBUS;
679 si.si_code = BUS_OBJERR;
680 si.si_errno = 0;
681 si.si_addr = (void *) regs->ior;
682 force_sig_info(SIGBUS, &si, current);
683 return;
684 }
685
686 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
687
688 parisc_terminate("Unexpected interruption", regs, code, 0);
689 /* NOT REACHED */
690 }
691
692 if (user_mode(regs)) {
693 if (fault_space != regs->sr[7]) {
694 #ifdef PRINT_USER_FAULTS
695 if (fault_space == 0)
696 printk(KERN_DEBUG "User Fault on Kernel Space ");
697 else
698 printk(KERN_DEBUG "User Fault (long pointer) ");
699 printk("pid=%d command='%s'\n", current->pid, current->comm);
700 show_regs(regs);
701 #endif
702 si.si_signo = SIGSEGV;
703 si.si_errno = 0;
704 si.si_code = SEGV_MAPERR;
705 si.si_addr = (void *) regs->ior;
706 force_sig_info(SIGSEGV, &si, current);
707 return;
708 }
709 }
710 else {
711
712 /*
713 * The kernel should never fault on its own address space.
714 */
715
716 if (fault_space == 0) {
717 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
718 parisc_terminate("Kernel Fault", regs, code, fault_address);
719 /** NOT REACHED **/
720 }
721 }
722
723 local_irq_enable();
724 do_page_fault(regs, code, fault_address);
725 }
726
727
728
check_ivt(void * iva)729 int __init check_ivt(void *iva)
730 {
731 int i;
732 u32 check = 0;
733 u32 *ivap;
734 u32 *hpmcp;
735 u32 length;
736 extern void os_hpmc(void);
737 extern void os_hpmc_end(void);
738
739 if (strcmp((char *)iva, "cows can fly"))
740 return -1;
741
742 ivap = (u32 *)iva;
743
744 for (i = 0; i < 8; i++)
745 *ivap++ = 0;
746
747 /* Compute Checksum for HPMC handler */
748
749 length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
750 ivap[7] = length;
751
752 hpmcp = (u32 *)os_hpmc;
753
754 for (i=0; i<length/4; i++)
755 check += *hpmcp++;
756
757 for (i=0; i<8; i++)
758 check += ivap[i];
759
760 ivap[5] = -check;
761
762 return 0;
763 }
764
765 #ifndef __LP64__
766 extern const void fault_vector_11;
767 #endif
768 extern const void fault_vector_20;
769
trap_init(void)770 void __init trap_init(void)
771 {
772 void *iva;
773
774 if (boot_cpu_data.cpu_type >= pcxu)
775 iva = (void *) &fault_vector_20;
776 else
777 #ifdef __LP64__
778 panic("Can't boot 64-bit OS on PA1.1 processor!");
779 #else
780 iva = (void *) &fault_vector_11;
781 #endif
782
783 if (check_ivt(iva))
784 panic("IVT invalid");
785 }
786