1 /*
2 * arch/alpha/kernel/traps.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 */
6
7 /*
8 * This file initializes the trap entry points
9 */
10
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/delay.h>
16 #include <linux/smp_lock.h>
17
18 #include <asm/gentrap.h>
19 #include <asm/uaccess.h>
20 #include <asm/unaligned.h>
21 #include <asm/sysinfo.h>
22 #include <asm/hwrpb.h>
23 #include <asm/mmu_context.h>
24
25 #include "proto.h"
26
27 /* data/code implementing a work-around for some SRMs which
28 mishandle opDEC faults
29 */
30 static int opDEC_testing = 0;
31 static int opDEC_fix = 0;
32 static int opDEC_checked = 0;
33 static unsigned long opDEC_test_pc = 0;
34
35 static void
opDEC_check(void)36 opDEC_check(void)
37 {
38 unsigned long test_pc;
39
40 if (opDEC_checked) return;
41
42 lock_kernel();
43 opDEC_testing = 1;
44
45 __asm__ __volatile__(
46 " br %0,1f\n"
47 "1: addq %0,8,%0\n"
48 " stq %0,%1\n"
49 " cvttq/svm $f31,$f31\n"
50 : "=&r"(test_pc), "=m"(opDEC_test_pc)
51 : );
52
53 opDEC_testing = 0;
54 opDEC_checked = 1;
55 unlock_kernel();
56 }
57
58 void
dik_show_regs(struct pt_regs * regs,unsigned long * r9_15)59 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
60 {
61 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
62 regs->pc, regs->r26, regs->ps, print_tainted());
63 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
64 regs->r0, regs->r1, regs->r2);
65 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
66 regs->r3, regs->r4, regs->r5);
67 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
68 regs->r6, regs->r7, regs->r8);
69
70 if (r9_15) {
71 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
72 r9_15[9], r9_15[10], r9_15[11]);
73 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
74 r9_15[12], r9_15[13], r9_15[14]);
75 printk("s6 = %016lx\n", r9_15[15]);
76 }
77
78 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
79 regs->r16, regs->r17, regs->r18);
80 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
81 regs->r19, regs->r20, regs->r21);
82 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
83 regs->r22, regs->r23, regs->r24);
84 printk("t11= %016lx pv = %016lx at = %016lx\n",
85 regs->r25, regs->r27, regs->r28);
86 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
87 #if 0
88 __halt();
89 #endif
90 }
91
92 #if 0
93 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
94 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
95 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
96 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
97 #endif
98
99 static void
dik_show_code(unsigned int * pc)100 dik_show_code(unsigned int *pc)
101 {
102 long i;
103
104 printk("Code:");
105 for (i = -6; i < 2; i++) {
106 unsigned int insn;
107 if (__get_user(insn, pc+i))
108 break;
109 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
110 }
111 printk("\n");
112 }
113
114 static void
dik_show_trace(unsigned long * sp)115 dik_show_trace(unsigned long *sp)
116 {
117 long i = 0;
118 printk("Trace:");
119 while (0x1ff8 & (unsigned long) sp) {
120 extern unsigned long _stext, _etext;
121 unsigned long tmp = *sp;
122 sp++;
123 if (tmp < (unsigned long) &_stext)
124 continue;
125 if (tmp >= (unsigned long) &_etext)
126 continue;
127 printk("%lx%c", tmp, ' ');
128 if (i > 40) {
129 printk(" ...");
130 break;
131 }
132 }
133 printk("\n");
134 }
135
show_trace_task(struct task_struct * tsk)136 void show_trace_task(struct task_struct * tsk)
137 {
138 struct thread_struct * thread = &tsk->thread;
139 unsigned long fp, sp = thread->ksp, base = (unsigned long) thread;
140
141 if (sp > base && sp+6*8 < base + 16*1024) {
142 fp = ((unsigned long*)sp)[6];
143 if (fp > sp && fp < base + 16*1024)
144 dik_show_trace((unsigned long *)fp);
145 }
146 }
147
148 int kstack_depth_to_print = 24;
149
show_stack(unsigned long * sp)150 void show_stack(unsigned long *sp)
151 {
152 unsigned long *stack;
153 int i;
154
155 /*
156 * debugging aid: "show_stack(NULL);" prints the
157 * back trace for this cpu.
158 */
159 if(sp==NULL)
160 sp=(unsigned long*)&sp;
161
162 stack = sp;
163 for(i=0; i < kstack_depth_to_print; i++) {
164 if (((long) stack & (THREAD_SIZE-1)) == 0)
165 break;
166 if (i && ((i % 4) == 0))
167 printk("\n ");
168 printk("%016lx ", *stack++);
169 }
170 printk("\n");
171 dik_show_trace(sp);
172 }
173
dump_stack(void)174 void dump_stack(void)
175 {
176 show_stack(NULL);
177 }
178
179 void
die_if_kernel(char * str,struct pt_regs * regs,long err,unsigned long * r9_15)180 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
181 {
182 if (regs->ps & 8)
183 return;
184 #ifdef CONFIG_SMP
185 printk("CPU %d ", hard_smp_processor_id());
186 #endif
187 printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err);
188 dik_show_regs(regs, r9_15);
189 dik_show_trace((unsigned long *)(regs+1));
190 dik_show_code((unsigned int *)regs->pc);
191
192 if (current->thread.flags & (1UL << 63)) {
193 printk("die_if_kernel recursion detected.\n");
194 sti();
195 while (1);
196 }
197 current->thread.flags |= (1UL << 63);
198 do_exit(SIGSEGV);
199 }
200
201 #ifndef CONFIG_MATHEMU
dummy_emul(void)202 static long dummy_emul(void) { return 0; }
203 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
204 = (void *)dummy_emul;
205 long (*alpha_fp_emul) (unsigned long pc)
206 = (void *)dummy_emul;
207 #else
208 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
209 long alpha_fp_emul (unsigned long pc);
210 #endif
211
212 asmlinkage void
do_entArith(unsigned long summary,unsigned long write_mask,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,struct pt_regs regs)213 do_entArith(unsigned long summary, unsigned long write_mask,
214 unsigned long a2, unsigned long a3, unsigned long a4,
215 unsigned long a5, struct pt_regs regs)
216 {
217 if (summary & 1) {
218 /* Software-completion summary bit is set, so try to
219 emulate the instruction. */
220 if (!amask(AMASK_PRECISE_TRAP)) {
221 /* 21264 (except pass 1) has precise exceptions. */
222 if (alpha_fp_emul(regs.pc - 4))
223 return;
224 } else {
225 if (alpha_fp_emul_imprecise(®s, write_mask))
226 return;
227 }
228 }
229
230 #if 0
231 printk("%s: arithmetic trap at %016lx: %02lx %016lx\n",
232 current->comm, regs.pc, summary, write_mask);
233 #endif
234 die_if_kernel("Arithmetic fault", ®s, 0, 0);
235 send_sig(SIGFPE, current, 1);
236 }
237
238 asmlinkage void
do_entIF(unsigned long type,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,struct pt_regs regs)239 do_entIF(unsigned long type, unsigned long a1,
240 unsigned long a2, unsigned long a3, unsigned long a4,
241 unsigned long a5, struct pt_regs regs)
242 {
243 if (!opDEC_testing || type != 4) {
244 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
245 ®s, type, 0);
246 }
247
248 switch (type) {
249 case 0: /* breakpoint */
250 if (ptrace_cancel_bpt(current)) {
251 regs.pc -= 4; /* make pc point to former bpt */
252 }
253 send_sig(SIGTRAP, current, 1);
254 return;
255
256 case 1: /* bugcheck */
257 send_sig(SIGTRAP, current, 1);
258 return;
259
260 case 2: /* gentrap */
261 /*
262 * The exception code should be passed on to the signal
263 * handler as the second argument. Linux doesn't do that
264 * yet (also notice that Linux *always* behaves like
265 * DEC Unix with SA_SIGINFO off; see DEC Unix man page
266 * for sigaction(2)).
267 */
268 switch ((long) regs.r16) {
269 case GEN_INTOVF: case GEN_INTDIV: case GEN_FLTOVF:
270 case GEN_FLTDIV: case GEN_FLTUND: case GEN_FLTINV:
271 case GEN_FLTINE: case GEN_ROPRAND:
272 send_sig(SIGFPE, current, 1);
273 return;
274
275 case GEN_DECOVF:
276 case GEN_DECDIV:
277 case GEN_DECINV:
278 case GEN_ASSERTERR:
279 case GEN_NULPTRERR:
280 case GEN_STKOVF:
281 case GEN_STRLENERR:
282 case GEN_SUBSTRERR:
283 case GEN_RANGERR:
284 case GEN_SUBRNG:
285 case GEN_SUBRNG1:
286 case GEN_SUBRNG2:
287 case GEN_SUBRNG3:
288 case GEN_SUBRNG4:
289 case GEN_SUBRNG5:
290 case GEN_SUBRNG6:
291 case GEN_SUBRNG7:
292 send_sig(SIGTRAP, current, 1);
293 return;
294 }
295 break;
296
297 case 4: /* opDEC */
298 if (implver() == IMPLVER_EV4) {
299 /* The some versions of SRM do not handle
300 the opDEC properly - they return the PC of the
301 opDEC fault, not the instruction after as the
302 Alpha architecture requires. Here we fix it up.
303 We do this by intentionally causing an opDEC
304 fault during the boot sequence and testing if
305 we get the correct PC. If not, we set a flag
306 to correct it every time through.
307 */
308 if (opDEC_testing) {
309 if (regs.pc == opDEC_test_pc) {
310 opDEC_fix = 4;
311 regs.pc += 4;
312 printk("opDEC fixup enabled.\n");
313 }
314 return;
315 }
316 regs.pc += opDEC_fix;
317
318 /* EV4 does not implement anything except normal
319 rounding. Everything else will come here as
320 an illegal instruction. Emulate them. */
321 if (alpha_fp_emul(regs.pc-4))
322 return;
323 }
324 break;
325
326 case 3: /* FEN fault */
327 /* Irritating users can call PAL_clrfen to disable the
328 FPU for the process. The kernel will then trap in
329 do_switch_stack and undo_switch_stack when we try
330 to save and restore the FP registers.
331
332 Given that GCC by default generates code that uses the
333 FP registers, PAL_clrfen is not useful except for DoS
334 attacks. So turn the bleeding FPU back on and be done
335 with it. */
336 current->thread.pal_flags |= 1;
337 __reload_thread(¤t->thread);
338 return;
339
340 case 5: /* illoc */
341 default: /* unexpected instruction-fault type */
342 ;
343 }
344 send_sig(SIGILL, current, 1);
345 }
346
347 /* There is an ifdef in the PALcode in MILO that enables a
348 "kernel debugging entry point" as an unpriviledged call_pal.
349
350 We don't want to have anything to do with it, but unfortunately
351 several versions of MILO included in distributions have it enabled,
352 and if we don't put something on the entry point we'll oops. */
353
354 asmlinkage void
do_entDbg(unsigned long type,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,struct pt_regs regs)355 do_entDbg(unsigned long type, unsigned long a1,
356 unsigned long a2, unsigned long a3, unsigned long a4,
357 unsigned long a5, struct pt_regs regs)
358 {
359 die_if_kernel("Instruction fault", ®s, type, 0);
360 force_sig(SIGILL, current);
361 }
362
363
364 /*
365 * entUna has a different register layout to be reasonably simple. It
366 * needs access to all the integer registers (the kernel doesn't use
367 * fp-regs), and it needs to have them in order for simpler access.
368 *
369 * Due to the non-standard register layout (and because we don't want
370 * to handle floating-point regs), user-mode unaligned accesses are
371 * handled separately by do_entUnaUser below.
372 *
373 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
374 * on a gp-register unaligned load/store, something is _very_ wrong
375 * in the kernel anyway..
376 */
377 struct allregs {
378 unsigned long regs[32];
379 unsigned long ps, pc, gp, a0, a1, a2;
380 };
381
382 struct unaligned_stat {
383 unsigned long count, va, pc;
384 } unaligned[2];
385
386
387 /* Macro for exception fixup code to access integer registers. */
388 #define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
389
390
391 asmlinkage void
do_entUna(void * va,unsigned long opcode,unsigned long reg,unsigned long a3,unsigned long a4,unsigned long a5,struct allregs regs)392 do_entUna(void * va, unsigned long opcode, unsigned long reg,
393 unsigned long a3, unsigned long a4, unsigned long a5,
394 struct allregs regs)
395 {
396 long error, tmp1, tmp2, tmp3, tmp4;
397 unsigned long pc = regs.pc - 4;
398 unsigned fixup;
399
400 unaligned[0].count++;
401 unaligned[0].va = (unsigned long) va;
402 unaligned[0].pc = pc;
403
404 /* We don't want to use the generic get/put unaligned macros as
405 we want to trap exceptions. Only if we actually get an
406 exception will we decide whether we should have caught it. */
407
408 switch (opcode) {
409 case 0x0c: /* ldwu */
410 __asm__ __volatile__(
411 "1: ldq_u %1,0(%3)\n"
412 "2: ldq_u %2,1(%3)\n"
413 " extwl %1,%3,%1\n"
414 " extwh %2,%3,%2\n"
415 "3:\n"
416 ".section __ex_table,\"a\"\n"
417 " .gprel32 1b\n"
418 " lda %1,3b-1b(%0)\n"
419 " .gprel32 2b\n"
420 " lda %2,3b-2b(%0)\n"
421 ".previous"
422 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
423 : "r"(va), "0"(0));
424 if (error)
425 goto got_exception;
426 una_reg(reg) = tmp1|tmp2;
427 return;
428
429 case 0x28: /* ldl */
430 __asm__ __volatile__(
431 "1: ldq_u %1,0(%3)\n"
432 "2: ldq_u %2,3(%3)\n"
433 " extll %1,%3,%1\n"
434 " extlh %2,%3,%2\n"
435 "3:\n"
436 ".section __ex_table,\"a\"\n"
437 " .gprel32 1b\n"
438 " lda %1,3b-1b(%0)\n"
439 " .gprel32 2b\n"
440 " lda %2,3b-2b(%0)\n"
441 ".previous"
442 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
443 : "r"(va), "0"(0));
444 if (error)
445 goto got_exception;
446 una_reg(reg) = (int)(tmp1|tmp2);
447 return;
448
449 case 0x29: /* ldq */
450 __asm__ __volatile__(
451 "1: ldq_u %1,0(%3)\n"
452 "2: ldq_u %2,7(%3)\n"
453 " extql %1,%3,%1\n"
454 " extqh %2,%3,%2\n"
455 "3:\n"
456 ".section __ex_table,\"a\"\n"
457 " .gprel32 1b\n"
458 " lda %1,3b-1b(%0)\n"
459 " .gprel32 2b\n"
460 " lda %2,3b-2b(%0)\n"
461 ".previous"
462 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
463 : "r"(va), "0"(0));
464 if (error)
465 goto got_exception;
466 una_reg(reg) = tmp1|tmp2;
467 return;
468
469 /* Note that the store sequences do not indicate that they change
470 memory because it _should_ be affecting nothing in this context.
471 (Otherwise we have other, much larger, problems.) */
472 case 0x0d: /* stw */
473 __asm__ __volatile__(
474 "1: ldq_u %2,1(%5)\n"
475 "2: ldq_u %1,0(%5)\n"
476 " inswh %6,%5,%4\n"
477 " inswl %6,%5,%3\n"
478 " mskwh %2,%5,%2\n"
479 " mskwl %1,%5,%1\n"
480 " or %2,%4,%2\n"
481 " or %1,%3,%1\n"
482 "3: stq_u %2,1(%5)\n"
483 "4: stq_u %1,0(%5)\n"
484 "5:\n"
485 ".section __ex_table,\"a\"\n"
486 " .gprel32 1b\n"
487 " lda %2,5b-1b(%0)\n"
488 " .gprel32 2b\n"
489 " lda %1,5b-2b(%0)\n"
490 " .gprel32 3b\n"
491 " lda $31,5b-3b(%0)\n"
492 " .gprel32 4b\n"
493 " lda $31,5b-4b(%0)\n"
494 ".previous"
495 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
496 "=&r"(tmp3), "=&r"(tmp4)
497 : "r"(va), "r"(una_reg(reg)), "0"(0));
498 if (error)
499 goto got_exception;
500 return;
501
502 case 0x2c: /* stl */
503 __asm__ __volatile__(
504 "1: ldq_u %2,3(%5)\n"
505 "2: ldq_u %1,0(%5)\n"
506 " inslh %6,%5,%4\n"
507 " insll %6,%5,%3\n"
508 " msklh %2,%5,%2\n"
509 " mskll %1,%5,%1\n"
510 " or %2,%4,%2\n"
511 " or %1,%3,%1\n"
512 "3: stq_u %2,3(%5)\n"
513 "4: stq_u %1,0(%5)\n"
514 "5:\n"
515 ".section __ex_table,\"a\"\n"
516 " .gprel32 1b\n"
517 " lda %2,5b-1b(%0)\n"
518 " .gprel32 2b\n"
519 " lda %1,5b-2b(%0)\n"
520 " .gprel32 3b\n"
521 " lda $31,5b-3b(%0)\n"
522 " .gprel32 4b\n"
523 " lda $31,5b-4b(%0)\n"
524 ".previous"
525 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
526 "=&r"(tmp3), "=&r"(tmp4)
527 : "r"(va), "r"(una_reg(reg)), "0"(0));
528 if (error)
529 goto got_exception;
530 return;
531
532 case 0x2d: /* stq */
533 __asm__ __volatile__(
534 "1: ldq_u %2,7(%5)\n"
535 "2: ldq_u %1,0(%5)\n"
536 " insqh %6,%5,%4\n"
537 " insql %6,%5,%3\n"
538 " mskqh %2,%5,%2\n"
539 " mskql %1,%5,%1\n"
540 " or %2,%4,%2\n"
541 " or %1,%3,%1\n"
542 "3: stq_u %2,7(%5)\n"
543 "4: stq_u %1,0(%5)\n"
544 "5:\n"
545 ".section __ex_table,\"a\"\n\t"
546 " .gprel32 1b\n"
547 " lda %2,5b-1b(%0)\n"
548 " .gprel32 2b\n"
549 " lda %1,5b-2b(%0)\n"
550 " .gprel32 3b\n"
551 " lda $31,5b-3b(%0)\n"
552 " .gprel32 4b\n"
553 " lda $31,5b-4b(%0)\n"
554 ".previous"
555 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
556 "=&r"(tmp3), "=&r"(tmp4)
557 : "r"(va), "r"(una_reg(reg)), "0"(0));
558 if (error)
559 goto got_exception;
560 return;
561 }
562
563 lock_kernel();
564 printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n",
565 pc, va, opcode, reg);
566 do_exit(SIGSEGV);
567
568 got_exception:
569 /* Ok, we caught the exception, but we don't want it. Is there
570 someone to pass it along to? */
571 if ((fixup = search_exception_table(pc, regs.gp)) != 0) {
572 unsigned long newpc;
573 newpc = fixup_exception(una_reg, fixup, pc);
574
575 printk("Forwarding unaligned exception at %lx (%lx)\n",
576 pc, newpc);
577
578 (®s)->pc = newpc;
579 return;
580 }
581
582 /*
583 * Yikes! No one to forward the exception to.
584 * Since the registers are in a weird format, dump them ourselves.
585 */
586 lock_kernel();
587
588 printk("%s(%d): unhandled unaligned exception\n",
589 current->comm, current->pid);
590
591 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
592 pc, una_reg(26), regs.ps);
593 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
594 una_reg(0), una_reg(1), una_reg(2));
595 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
596 una_reg(3), una_reg(4), una_reg(5));
597 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
598 una_reg(6), una_reg(7), una_reg(8));
599 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
600 una_reg(9), una_reg(10), una_reg(11));
601 printk("r12= %016lx r13= %016lx r14= %016lx\n",
602 una_reg(12), una_reg(13), una_reg(14));
603 printk("r15= %016lx\n", una_reg(15));
604 printk("r16= %016lx r17= %016lx r18= %016lx\n",
605 una_reg(16), una_reg(17), una_reg(18));
606 printk("r19= %016lx r20= %016lx r21= %016lx\n",
607 una_reg(19), una_reg(20), una_reg(21));
608 printk("r22= %016lx r23= %016lx r24= %016lx\n",
609 una_reg(22), una_reg(23), una_reg(24));
610 printk("r25= %016lx r27= %016lx r28= %016lx\n",
611 una_reg(25), una_reg(27), una_reg(28));
612 printk("gp = %016lx sp = %p\n", regs.gp, ®s+1);
613
614 dik_show_code((unsigned int *)pc);
615 dik_show_trace((unsigned long *)(®s+1));
616
617 if (current->thread.flags & (1UL << 63)) {
618 printk("die_if_kernel recursion detected.\n");
619 sti();
620 while (1);
621 }
622 current->thread.flags |= (1UL << 63);
623 do_exit(SIGSEGV);
624 }
625
626 /*
627 * Convert an s-floating point value in memory format to the
628 * corresponding value in register format. The exponent
629 * needs to be remapped to preserve non-finite values
630 * (infinities, not-a-numbers, denormals).
631 */
632 static inline unsigned long
s_mem_to_reg(unsigned long s_mem)633 s_mem_to_reg (unsigned long s_mem)
634 {
635 unsigned long frac = (s_mem >> 0) & 0x7fffff;
636 unsigned long sign = (s_mem >> 31) & 0x1;
637 unsigned long exp_msb = (s_mem >> 30) & 0x1;
638 unsigned long exp_low = (s_mem >> 23) & 0x7f;
639 unsigned long exp;
640
641 exp = (exp_msb << 10) | exp_low; /* common case */
642 if (exp_msb) {
643 if (exp_low == 0x7f) {
644 exp = 0x7ff;
645 }
646 } else {
647 if (exp_low == 0x00) {
648 exp = 0x000;
649 } else {
650 exp |= (0x7 << 7);
651 }
652 }
653 return (sign << 63) | (exp << 52) | (frac << 29);
654 }
655
656 /*
657 * Convert an s-floating point value in register format to the
658 * corresponding value in memory format.
659 */
660 static inline unsigned long
s_reg_to_mem(unsigned long s_reg)661 s_reg_to_mem (unsigned long s_reg)
662 {
663 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
664 }
665
666 /*
667 * Handle user-level unaligned fault. Handling user-level unaligned
668 * faults is *extremely* slow and produces nasty messages. A user
669 * program *should* fix unaligned faults ASAP.
670 *
671 * Notice that we have (almost) the regular kernel stack layout here,
672 * so finding the appropriate registers is a little more difficult
673 * than in the kernel case.
674 *
675 * Finally, we handle regular integer load/stores only. In
676 * particular, load-linked/store-conditionally and floating point
677 * load/stores are not supported. The former make no sense with
678 * unaligned faults (they are guaranteed to fail) and I don't think
679 * the latter will occur in any decent program.
680 *
681 * Sigh. We *do* have to handle some FP operations, because GCC will
682 * uses them as temporary storage for integer memory to memory copies.
683 * However, we need to deal with stt/ldt and sts/lds only.
684 */
685
686 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
687 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
688 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
689 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
690
691 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
692 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
693 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
694
695 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
696
697 static int unauser_reg_offsets[32] = {
698 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
699 /* r9 ... r15 are stored in front of regs. */
700 -56, -48, -40, -32, -24, -16, -8,
701 R(r16), R(r17), R(r18),
702 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
703 R(r27), R(r28), R(gp),
704 0, 0
705 };
706
707 #undef R
708
709 asmlinkage void
do_entUnaUser(void * va,unsigned long opcode,unsigned long reg,struct pt_regs * regs)710 do_entUnaUser(void * va, unsigned long opcode,
711 unsigned long reg, struct pt_regs *regs)
712 {
713 static int cnt = 0;
714 static long last_time = 0;
715
716 unsigned long tmp1, tmp2, tmp3, tmp4;
717 unsigned long fake_reg, *reg_addr = &fake_reg;
718 unsigned long uac_bits;
719 long error;
720
721 /* Check the UAC bits to decide what the user wants us to do
722 with the unaliged access. */
723
724 uac_bits = (current->thread.flags >> UAC_SHIFT) & UAC_BITMASK;
725 if (!(uac_bits & UAC_NOPRINT)) {
726 if (cnt >= 5 && jiffies - last_time > 5*HZ) {
727 cnt = 0;
728 }
729 if (++cnt < 5) {
730 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
731 current->comm, current->pid,
732 regs->pc - 4, va, opcode, reg);
733 }
734 last_time = jiffies;
735 }
736 if (uac_bits & UAC_SIGBUS) {
737 goto give_sigbus;
738 }
739 if (uac_bits & UAC_NOFIX) {
740 /* Not sure why you'd want to use this, but... */
741 return;
742 }
743
744 /* Don't bother reading ds in the access check since we already
745 know that this came from the user. Also rely on the fact that
746 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
747 if (!__access_ok((unsigned long)va, 0, USER_DS))
748 goto give_sigsegv;
749
750 ++unaligned[1].count;
751 unaligned[1].va = (unsigned long)va;
752 unaligned[1].pc = regs->pc - 4;
753
754 if ((1L << opcode) & OP_INT_MASK) {
755 /* it's an integer load/store */
756 if (reg < 30) {
757 reg_addr = (unsigned long *)
758 ((char *)regs + unauser_reg_offsets[reg]);
759 } else if (reg == 30) {
760 /* usp in PAL regs */
761 fake_reg = rdusp();
762 } else {
763 /* zero "register" */
764 fake_reg = 0;
765 }
766 }
767
768 /* We don't want to use the generic get/put unaligned macros as
769 we want to trap exceptions. Only if we actually get an
770 exception will we decide whether we should have caught it. */
771
772 switch (opcode) {
773 case 0x0c: /* ldwu */
774 __asm__ __volatile__(
775 "1: ldq_u %1,0(%3)\n"
776 "2: ldq_u %2,1(%3)\n"
777 " extwl %1,%3,%1\n"
778 " extwh %2,%3,%2\n"
779 "3:\n"
780 ".section __ex_table,\"a\"\n"
781 " .gprel32 1b\n"
782 " lda %1,3b-1b(%0)\n"
783 " .gprel32 2b\n"
784 " lda %2,3b-2b(%0)\n"
785 ".previous"
786 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
787 : "r"(va), "0"(0));
788 if (error)
789 goto give_sigsegv;
790 *reg_addr = tmp1|tmp2;
791 break;
792
793 case 0x22: /* lds */
794 __asm__ __volatile__(
795 "1: ldq_u %1,0(%3)\n"
796 "2: ldq_u %2,3(%3)\n"
797 " extll %1,%3,%1\n"
798 " extlh %2,%3,%2\n"
799 "3:\n"
800 ".section __ex_table,\"a\"\n"
801 " .gprel32 1b\n"
802 " lda %1,3b-1b(%0)\n"
803 " .gprel32 2b\n"
804 " lda %2,3b-2b(%0)\n"
805 ".previous"
806 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
807 : "r"(va), "0"(0));
808 if (error)
809 goto give_sigsegv;
810 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
811 return;
812
813 case 0x23: /* ldt */
814 __asm__ __volatile__(
815 "1: ldq_u %1,0(%3)\n"
816 "2: ldq_u %2,7(%3)\n"
817 " extql %1,%3,%1\n"
818 " extqh %2,%3,%2\n"
819 "3:\n"
820 ".section __ex_table,\"a\"\n"
821 " .gprel32 1b\n"
822 " lda %1,3b-1b(%0)\n"
823 " .gprel32 2b\n"
824 " lda %2,3b-2b(%0)\n"
825 ".previous"
826 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
827 : "r"(va), "0"(0));
828 if (error)
829 goto give_sigsegv;
830 alpha_write_fp_reg(reg, tmp1|tmp2);
831 return;
832
833 case 0x28: /* ldl */
834 __asm__ __volatile__(
835 "1: ldq_u %1,0(%3)\n"
836 "2: ldq_u %2,3(%3)\n"
837 " extll %1,%3,%1\n"
838 " extlh %2,%3,%2\n"
839 "3:\n"
840 ".section __ex_table,\"a\"\n"
841 " .gprel32 1b\n"
842 " lda %1,3b-1b(%0)\n"
843 " .gprel32 2b\n"
844 " lda %2,3b-2b(%0)\n"
845 ".previous"
846 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
847 : "r"(va), "0"(0));
848 if (error)
849 goto give_sigsegv;
850 *reg_addr = (int)(tmp1|tmp2);
851 break;
852
853 case 0x29: /* ldq */
854 __asm__ __volatile__(
855 "1: ldq_u %1,0(%3)\n"
856 "2: ldq_u %2,7(%3)\n"
857 " extql %1,%3,%1\n"
858 " extqh %2,%3,%2\n"
859 "3:\n"
860 ".section __ex_table,\"a\"\n"
861 " .gprel32 1b\n"
862 " lda %1,3b-1b(%0)\n"
863 " .gprel32 2b\n"
864 " lda %2,3b-2b(%0)\n"
865 ".previous"
866 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
867 : "r"(va), "0"(0));
868 if (error)
869 goto give_sigsegv;
870 *reg_addr = tmp1|tmp2;
871 break;
872
873 /* Note that the store sequences do not indicate that they change
874 memory because it _should_ be affecting nothing in this context.
875 (Otherwise we have other, much larger, problems.) */
876 case 0x0d: /* stw */
877 __asm__ __volatile__(
878 "1: ldq_u %2,1(%5)\n"
879 "2: ldq_u %1,0(%5)\n"
880 " inswh %6,%5,%4\n"
881 " inswl %6,%5,%3\n"
882 " mskwh %2,%5,%2\n"
883 " mskwl %1,%5,%1\n"
884 " or %2,%4,%2\n"
885 " or %1,%3,%1\n"
886 "3: stq_u %2,1(%5)\n"
887 "4: stq_u %1,0(%5)\n"
888 "5:\n"
889 ".section __ex_table,\"a\"\n"
890 " .gprel32 1b\n"
891 " lda %2,5b-1b(%0)\n"
892 " .gprel32 2b\n"
893 " lda %1,5b-2b(%0)\n"
894 " .gprel32 3b\n"
895 " lda $31,5b-3b(%0)\n"
896 " .gprel32 4b\n"
897 " lda $31,5b-4b(%0)\n"
898 ".previous"
899 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
900 "=&r"(tmp3), "=&r"(tmp4)
901 : "r"(va), "r"(*reg_addr), "0"(0));
902 if (error)
903 goto give_sigsegv;
904 return;
905
906 case 0x26: /* sts */
907 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
908 /* FALLTHRU */
909
910 case 0x2c: /* stl */
911 __asm__ __volatile__(
912 "1: ldq_u %2,3(%5)\n"
913 "2: ldq_u %1,0(%5)\n"
914 " inslh %6,%5,%4\n"
915 " insll %6,%5,%3\n"
916 " msklh %2,%5,%2\n"
917 " mskll %1,%5,%1\n"
918 " or %2,%4,%2\n"
919 " or %1,%3,%1\n"
920 "3: stq_u %2,3(%5)\n"
921 "4: stq_u %1,0(%5)\n"
922 "5:\n"
923 ".section __ex_table,\"a\"\n"
924 " .gprel32 1b\n"
925 " lda %2,5b-1b(%0)\n"
926 " .gprel32 2b\n"
927 " lda %1,5b-2b(%0)\n"
928 " .gprel32 3b\n"
929 " lda $31,5b-3b(%0)\n"
930 " .gprel32 4b\n"
931 " lda $31,5b-4b(%0)\n"
932 ".previous"
933 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
934 "=&r"(tmp3), "=&r"(tmp4)
935 : "r"(va), "r"(*reg_addr), "0"(0));
936 if (error)
937 goto give_sigsegv;
938 return;
939
940 case 0x27: /* stt */
941 fake_reg = alpha_read_fp_reg(reg);
942 /* FALLTHRU */
943
944 case 0x2d: /* stq */
945 __asm__ __volatile__(
946 "1: ldq_u %2,7(%5)\n"
947 "2: ldq_u %1,0(%5)\n"
948 " insqh %6,%5,%4\n"
949 " insql %6,%5,%3\n"
950 " mskqh %2,%5,%2\n"
951 " mskql %1,%5,%1\n"
952 " or %2,%4,%2\n"
953 " or %1,%3,%1\n"
954 "3: stq_u %2,7(%5)\n"
955 "4: stq_u %1,0(%5)\n"
956 "5:\n"
957 ".section __ex_table,\"a\"\n\t"
958 " .gprel32 1b\n"
959 " lda %2,5b-1b(%0)\n"
960 " .gprel32 2b\n"
961 " lda %1,5b-2b(%0)\n"
962 " .gprel32 3b\n"
963 " lda $31,5b-3b(%0)\n"
964 " .gprel32 4b\n"
965 " lda $31,5b-4b(%0)\n"
966 ".previous"
967 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
968 "=&r"(tmp3), "=&r"(tmp4)
969 : "r"(va), "r"(*reg_addr), "0"(0));
970 if (error)
971 goto give_sigsegv;
972 return;
973
974 default:
975 /* What instruction were you trying to use, exactly? */
976 goto give_sigbus;
977 }
978
979 /* Only integer loads should get here; everyone else returns early. */
980 if (reg == 30)
981 wrusp(fake_reg);
982 return;
983
984 give_sigsegv:
985 regs->pc -= 4; /* make pc point to faulting insn */
986 send_sig(SIGSEGV, current, 1);
987 return;
988
989 give_sigbus:
990 regs->pc -= 4;
991 send_sig(SIGBUS, current, 1);
992 return;
993 }
994
995 /*
996 * Unimplemented system calls.
997 */
998 asmlinkage long
alpha_ni_syscall(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,struct pt_regs regs)999 alpha_ni_syscall(unsigned long a0, unsigned long a1, unsigned long a2,
1000 unsigned long a3, unsigned long a4, unsigned long a5,
1001 struct pt_regs regs)
1002 {
1003 /* We only get here for OSF system calls, minus #112;
1004 the rest go to sys_ni_syscall. */
1005 #if 0
1006 printk("<sc %ld(%lx,%lx,%lx)>", regs.r0, a0, a1, a2);
1007 #endif
1008 return -ENOSYS;
1009 }
1010
1011 void
trap_init(void)1012 trap_init(void)
1013 {
1014 /* Tell PAL-code what global pointer we want in the kernel. */
1015 register unsigned long gptr __asm__("$29");
1016 wrkgp(gptr);
1017
1018 wrent(entArith, 1);
1019 wrent(entMM, 2);
1020 wrent(entIF, 3);
1021 wrent(entUna, 4);
1022 wrent(entSys, 5);
1023 wrent(entDbg, 6);
1024
1025 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1026 * a bug in the handling of the opDEC fault. Fix it up if so.
1027 */
1028 if (implver() == IMPLVER_EV4) {
1029 opDEC_check();
1030 }
1031 }
1032