1 /* $Id: traps.c,v 1.1.1.1.2.5 2003/10/23 22:08:56 yoshii Exp $
2 *
3 * linux/arch/sh/traps.c
4 *
5 * SuperH version: Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2000 Philipp Rumpf
7 * Copyright (C) 2000 David Howells
8 * Copyright (C) 2002 Paul Mundt
9 */
10
11 /*
12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'entry.S'.
14 */
15 #include <linux/config.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/ptrace.h>
21 #include <linux/timer.h>
22 #include <linux/mm.h>
23 #include <linux/smp.h>
24 #include <linux/smp_lock.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/spinlock.h>
28
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/atomic.h>
33 #include <asm/processor.h>
34
35 #ifdef CONFIG_SH_KGDB
36 #include <asm/kgdb.h>
37 #define CHK_REMOTE_DEBUG(regs) \
38 { \
39 if ((kgdb_debug_hook != (kgdb_debug_hook_t *) NULL) && (!user_mode(regs))) \
40 { \
41 (*kgdb_debug_hook)(regs); \
42 } \
43 }
44 #else
45 #define CHK_REMOTE_DEBUG(regs)
46 #endif
47
48 #define DO_ERROR(trapnr, signr, str, name, tsk) \
49 asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
50 unsigned long r6, unsigned long r7, \
51 struct pt_regs regs) \
52 { \
53 unsigned long error_code; \
54 \
55 /* Check if it's a DSP instruction */ \
56 if (is_dsp_inst(®s)) { \
57 /* Enable DSP mode, and restart instruction. */ \
58 regs.sr |= SR_DSP; \
59 return; \
60 } \
61 \
62 asm volatile("stc r2_bank, %0": "=r" (error_code)); \
63 sti(); \
64 tsk->thread.error_code = error_code; \
65 tsk->thread.trap_no = trapnr; \
66 CHK_REMOTE_DEBUG(®s); \
67 force_sig(signr, tsk); \
68 die_if_no_fixup(str,®s,error_code); \
69 }
70
71 /*
72 * These constants are for searching for possible module text
73 * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
74 * a guess of how much space is likely to be vmalloced.
75 */
76 #define VMALLOC_OFFSET (8*1024*1024)
77 #define MODULE_RANGE (8*1024*1024)
78
79 spinlock_t die_lock;
80
die(const char * str,struct pt_regs * regs,long err)81 void die(const char * str, struct pt_regs * regs, long err)
82 {
83 console_verbose();
84 spin_lock_irq(&die_lock);
85 printk("%s: %04lx\n", str, err & 0xffff);
86 CHK_REMOTE_DEBUG(regs);
87 show_regs(regs);
88 spin_unlock_irq(&die_lock);
89 do_exit(SIGSEGV);
90 }
91
die_if_kernel(const char * str,struct pt_regs * regs,long err)92 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
93 {
94 if (!user_mode(regs))
95 die(str, regs, err);
96 }
97
98 static int handle_unaligned_notify_count = 10;
99
100 /*
101 * try and fix up kernelspace address errors
102 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
103 * - kernel/userspace interfaces cause a jump to an appropriate handler
104 * - other kernel errors are bad
105 * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
106 */
die_if_no_fixup(const char * str,struct pt_regs * regs,long err)107 static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
108 {
109 if (!user_mode(regs))
110 {
111 unsigned long fixup;
112 fixup = search_exception_table(regs->pc);
113 if (fixup) {
114 regs->pc = fixup;
115 return 0;
116 }
117 die(str, regs, err);
118 }
119 return -EFAULT;
120 }
121
122 /*
123 * handle an instruction that does an unaligned memory access by emulating the
124 * desired behaviour
125 * - note that PC _may not_ point to the faulting instruction
126 * (if that instruction is in a branch delay slot)
127 * - return 0 if emulation okay, -EFAULT on existential error
128 */
handle_unaligned_ins(u16 instruction,struct pt_regs * regs)129 static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
130 {
131 int ret, index, count;
132 unsigned long *rm, *rn;
133 unsigned char *src, *dst;
134
135 index = (instruction>>8)&15; /* 0x0F00 */
136 rn = ®s->regs[index];
137
138 index = (instruction>>4)&15; /* 0x00F0 */
139 rm = ®s->regs[index];
140
141 count = 1<<(instruction&3);
142
143 ret = -EFAULT;
144 switch (instruction>>12) {
145 case 0: /* mov.[bwl] to/from memory via r0+rn */
146 if (instruction & 8) {
147 /* from memory */
148 src = (unsigned char*) *rm;
149 src += regs->regs[0];
150 dst = (unsigned char*) rn;
151 *(unsigned long*)dst = 0;
152
153 #ifdef __LITTLE_ENDIAN__
154 if (copy_from_user(dst, src, count))
155 goto fetch_fault;
156
157 if ((count == 2) && dst[1] & 0x80) {
158 dst[2] = 0xff;
159 dst[3] = 0xff;
160 }
161 #else
162 dst += 4-count;
163
164 if (__copy_user(dst, src, count))
165 goto fetch_fault;
166
167 if ((count == 2) && dst[2] & 0x80) {
168 dst[0] = 0xff;
169 dst[1] = 0xff;
170 }
171 #endif
172 } else {
173 /* to memory */
174 src = (unsigned char*) rm;
175 #if !defined(__LITTLE_ENDIAN__)
176 src += 4-count;
177 #endif
178 dst = (unsigned char*) *rn;
179 dst += regs->regs[0];
180
181 if (copy_to_user(dst, src, count))
182 goto fetch_fault;
183 }
184 ret = 0;
185 break;
186
187 case 1: /* mov.l Rm,@(disp,Rn) */
188 src = (unsigned char*) rm;
189 dst = (unsigned char*) *rn;
190 dst += (instruction&0x000F)<<2;
191
192 if (copy_to_user(dst,src,4))
193 goto fetch_fault;
194 ret = 0;
195 break;
196
197 case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
198 if (instruction & 4)
199 *rn -= count;
200 src = (unsigned char*) rm;
201 dst = (unsigned char*) *rn;
202 #if !defined(__LITTLE_ENDIAN__)
203 src += 4-count;
204 #endif
205 if (copy_to_user(dst, src, count))
206 goto fetch_fault;
207 ret = 0;
208 break;
209
210 case 5: /* mov.l @(disp,Rm),Rn */
211 src = (unsigned char*) *rm;
212 src += (instruction&0x000F)<<2;
213 dst = (unsigned char*) rn;
214 *(unsigned long*)dst = 0;
215
216 if (copy_from_user(dst,src,4))
217 goto fetch_fault;
218 ret = 0;
219 break;
220
221 case 6: /* mov.[bwl] from memory, possibly with post-increment */
222 src = (unsigned char*) *rm;
223 if (instruction & 4)
224 *rm += count;
225 dst = (unsigned char*) rn;
226 *(unsigned long*)dst = 0;
227
228 #ifdef __LITTLE_ENDIAN__
229 if (copy_from_user(dst, src, count))
230 goto fetch_fault;
231
232 if ((count == 2) && dst[1] & 0x80) {
233 dst[2] = 0xff;
234 dst[3] = 0xff;
235 }
236 #else
237 dst += 4-count;
238
239 if (copy_from_user(dst, src, count))
240 goto fetch_fault;
241
242 if ((count == 2) && dst[2] & 0x80) {
243 dst[0] = 0xff;
244 dst[1] = 0xff;
245 }
246 #endif
247 ret = 0;
248 break;
249
250 case 8:
251 switch ((instruction&0xFF00)>>8) {
252 case 0x81: /* mov.w R0,@(disp,Rn) */
253 src = (unsigned char*) ®s->regs[0];
254 #if !defined(__LITTLE_ENDIAN__)
255 src += 2;
256 #endif
257 dst = (unsigned char*) *rm; /* called Rn in the spec */
258 dst += (instruction&0x000F)<<1;
259
260 if (copy_to_user(dst, src, 2))
261 goto fetch_fault;
262 ret = 0;
263 break;
264
265 case 0x85: /* mov.w @(disp,Rm),R0 */
266 src = (unsigned char*) *rm;
267 src += (instruction&0x000F)<<1;
268 dst = (unsigned char*) ®s->regs[0];
269 *(unsigned long*)dst = 0;
270
271 #if !defined(__LITTLE_ENDIAN__)
272 dst += 2;
273 #endif
274
275 if (copy_from_user(dst, src, 2))
276 goto fetch_fault;
277
278 #ifdef __LITTLE_ENDIAN__
279 if (dst[1] & 0x80) {
280 dst[2] = 0xff;
281 dst[3] = 0xff;
282 }
283 #else
284 if (dst[2] & 0x80) {
285 dst[0] = 0xff;
286 dst[1] = 0xff;
287 }
288 #endif
289 ret = 0;
290 break;
291 }
292 break;
293 }
294 return ret;
295
296 fetch_fault:
297 /* Argh. Address not only misaligned but also non-existent.
298 * Raise an EFAULT and see if it's trapped
299 */
300 return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
301 }
302
303 /*
304 * emulate the instruction in the delay slot
305 * - fetches the instruction from PC+2
306 */
handle_unaligned_delayslot(struct pt_regs * regs)307 static inline int handle_unaligned_delayslot(struct pt_regs *regs)
308 {
309 u16 instruction;
310
311 if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
312 /* the instruction-fetch faulted */
313 if (user_mode(regs))
314 return -EFAULT;
315
316 /* kernel */
317 die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
318 }
319
320 return handle_unaligned_ins(instruction,regs);
321 }
322
323 /*
324 * handle an instruction that does an unaligned memory access
325 * - have to be careful of branch delay-slot instructions that fault
326 * SH3:
327 * - if the branch would be taken PC points to the branch
328 * - if the branch would not be taken, PC points to delay-slot
329 * SH4:
330 * - PC always points to delayed branch
331 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
332 */
333
334 /* Macros to determine offset from current PC for branch instructions */
335 /* Explicit type coercion is used to force sign extension where needed */
336 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
337 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
338
handle_unaligned_access(u16 instruction,struct pt_regs * regs)339 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
340 {
341 u_int rm;
342 int ret, index;
343
344 index = (instruction>>8)&15; /* 0x0F00 */
345 rm = regs->regs[index];
346
347 /* shout about the first ten userspace fixups */
348 if (user_mode(regs) && handle_unaligned_notify_count>0) {
349 handle_unaligned_notify_count--;
350
351 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
352 current->comm,current->pid,(u16*)regs->pc,instruction);
353 }
354
355 ret = -EFAULT;
356 switch (instruction&0xF000) {
357 case 0x0000:
358 if (instruction==0x000B) {
359 /* rts */
360 ret = handle_unaligned_delayslot(regs);
361 if (ret==0)
362 regs->pc = regs->pr;
363 }
364 else if ((instruction&0x00FF)==0x0023) {
365 /* braf @Rm */
366 ret = handle_unaligned_delayslot(regs);
367 if (ret==0)
368 regs->pc += rm + 4;
369 }
370 else if ((instruction&0x00FF)==0x0003) {
371 /* bsrf @Rm */
372 ret = handle_unaligned_delayslot(regs);
373 if (ret==0) {
374 regs->pr = regs->pc + 4;
375 regs->pc += rm + 4;
376 }
377 }
378 else {
379 /* mov.[bwl] to/from memory via r0+rn */
380 goto simple;
381 }
382 break;
383
384 case 0x1000: /* mov.l Rm,@(disp,Rn) */
385 goto simple;
386
387 case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
388 goto simple;
389
390 case 0x4000:
391 if ((instruction&0x00FF)==0x002B) {
392 /* jmp @Rm */
393 ret = handle_unaligned_delayslot(regs);
394 if (ret==0)
395 regs->pc = rm;
396 }
397 else if ((instruction&0x00FF)==0x000B) {
398 /* jsr @Rm */
399 ret = handle_unaligned_delayslot(regs);
400 if (ret==0) {
401 regs->pr = regs->pc + 4;
402 regs->pc = rm;
403 }
404 }
405 else {
406 /* mov.[bwl] to/from memory via r0+rn */
407 goto simple;
408 }
409 break;
410
411 case 0x5000: /* mov.l @(disp,Rm),Rn */
412 goto simple;
413
414 case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
415 goto simple;
416
417 case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
418 switch (instruction&0x0F00) {
419 case 0x0100: /* mov.w R0,@(disp,Rm) */
420 goto simple;
421 case 0x0500: /* mov.w @(disp,Rm),R0 */
422 goto simple;
423 case 0x0B00: /* bf lab - no delayslot*/
424 break;
425 case 0x0F00: /* bf/s lab */
426 ret = handle_unaligned_delayslot(regs);
427 if (ret==0) {
428 #if defined(__SH4__)
429 if ((regs->sr & 0x00000001) != 0)
430 regs->pc += 4; /* next after slot */
431 else
432 #endif
433 regs->pc += SH_PC_8BIT_OFFSET(instruction);
434 }
435 break;
436 case 0x0900: /* bt lab - no delayslot */
437 break;
438 case 0x0D00: /* bt/s lab */
439 ret = handle_unaligned_delayslot(regs);
440 if (ret==0) {
441 #if defined(__SH4__)
442 if ((regs->sr & 0x00000001) == 0)
443 regs->pc += 4; /* next after slot */
444 else
445 #endif
446 regs->pc += SH_PC_8BIT_OFFSET(instruction);
447 }
448 break;
449 }
450 break;
451
452 case 0xA000: /* bra label */
453 ret = handle_unaligned_delayslot(regs);
454 if (ret==0)
455 regs->pc += SH_PC_12BIT_OFFSET(instruction);
456 break;
457
458 case 0xB000: /* bsr label */
459 ret = handle_unaligned_delayslot(regs);
460 if (ret==0) {
461 regs->pr = regs->pc + 4;
462 regs->pc += SH_PC_12BIT_OFFSET(instruction);
463 }
464 break;
465 }
466 return ret;
467
468 /* handle non-delay-slot instruction */
469 simple:
470 ret = handle_unaligned_ins(instruction,regs);
471 if (ret==0)
472 regs->pc += 2;
473 return ret;
474 }
475
476 /*
477 * Handle various address error exceptions
478 */
do_address_error(struct pt_regs * regs,unsigned long writeaccess,unsigned long address)479 asmlinkage void do_address_error(struct pt_regs *regs,
480 unsigned long writeaccess,
481 unsigned long address)
482 {
483 unsigned long error_code;
484 mm_segment_t oldfs;
485 u16 instruction;
486 int tmp;
487
488 asm volatile("stc r2_bank,%0": "=r" (error_code));
489
490 oldfs = get_fs();
491
492 if (user_mode(regs)) {
493 sti();
494 current->thread.error_code = error_code;
495 current->thread.trap_no = (writeaccess) ? 8 : 7;
496
497 /* bad PC is not something we can fix */
498 if (regs->pc & 1)
499 goto uspace_segv;
500
501 set_fs(USER_DS);
502 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
503 /* Argh. Fault on the instruction itself.
504 This should never happen non-SMP
505 */
506 set_fs(oldfs);
507 goto uspace_segv;
508 }
509
510 tmp = handle_unaligned_access(instruction, regs);
511 set_fs(oldfs);
512
513 if (tmp==0)
514 return; /* sorted */
515
516 uspace_segv:
517 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
518 force_sig(SIGSEGV, current);
519 } else {
520 if (regs->pc & 1)
521 die("unaligned program counter", regs, error_code);
522
523 set_fs(KERNEL_DS);
524 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
525 /* Argh. Fault on the instruction itself.
526 This should never happen non-SMP
527 */
528 set_fs(oldfs);
529 die("insn faulting in do_address_error", regs, 0);
530 }
531
532 handle_unaligned_access(instruction, regs);
533 set_fs(oldfs);
534 }
535 }
536
537 #ifdef CONFIG_SH_DSP
538 /*
539 * SH-DSP support gerg@snapgear.com.
540 */
is_dsp_inst(struct pt_regs * regs)541 int is_dsp_inst(struct pt_regs *regs)
542 {
543 unsigned short inst;
544
545 get_user(inst, ((unsigned short *) regs->pc));
546
547 inst &= 0xf000;
548
549 /* Check for any type of DSP or support instruction */
550 if ((inst == 0xf000) || (inst == 0x4000))
551 return 1;
552
553 return 0;
554 }
555 #else
556 #define is_dsp_inst(regs) (0)
557 #endif /* CONFIG_SH_DSP */
558
559 DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
560 DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
561
do_exception_error(unsigned long r4,unsigned long r5,unsigned long r6,unsigned long r7,struct pt_regs regs)562 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
563 unsigned long r6, unsigned long r7,
564 struct pt_regs regs)
565 {
566 long ex;
567 asm volatile("stc r2_bank, %0" : "=r" (ex));
568 die_if_kernel("exception", ®s, ex);
569 }
570
571 #if defined(CONFIG_SH_STANDARD_BIOS)
572 void *gdb_vbr_vector;
573 #endif
574
trap_init(void)575 void __init trap_init(void)
576 {
577 extern void *vbr_base;
578 extern void *exception_handling_table[14];
579
580 exception_handling_table[12] = (void *)do_reserved_inst;
581 exception_handling_table[13] = (void *)do_illegal_slot_inst;
582
583 #if defined(CONFIG_SH_STANDARD_BIOS)
584 /*
585 * Read the old value of the VBR register to initialise
586 * the vector through which debug and BIOS traps are
587 * delegated by the Linux trap handler.
588 */
589 {
590 register unsigned long vbr;
591 asm volatile("stc vbr, %0" : "=r" (vbr));
592 gdb_vbr_vector = (void *)(vbr + 0x100);
593 printk("Setting GDB trap vector to 0x%08lx\n",
594 (unsigned long)gdb_vbr_vector);
595 }
596 #endif
597
598 /* NOTE: The VBR value should be at P1
599 (or P2, virtural "fixed" address space).
600 It's definitely should not in physical address. */
601
602 asm volatile("ldc %0, vbr"
603 : /* no output */
604 : "r" (&vbr_base)
605 : "memory");
606 }
607
show_task(unsigned long * sp)608 void show_task(unsigned long *sp)
609 {
610 unsigned long *stack, addr;
611 unsigned long module_start = VMALLOC_START;
612 unsigned long module_end = VMALLOC_END;
613 extern long _text, _etext;
614 int i = 1;
615
616 if (!sp) {
617 __asm__ __volatile__ (
618 "mov r15, %0\n\t"
619 "stc r7_bank, %1\n\t"
620 : "=r" (module_start),
621 "=r" (module_end)
622 );
623
624 sp = (unsigned long *)module_start;
625 }
626
627 stack = sp;
628
629 printk("\nCall trace: ");
630
631 while (((long)stack & (THREAD_SIZE - 1))) {
632 if (__get_user(addr, stack)) {
633 printk("Failing address 0x%lx\n", *stack);
634 break;
635 }
636 stack++;
637
638 if (((addr >= (unsigned long)&_text) &&
639 (addr <= (unsigned long)&_etext)) ||
640 ((addr >= module_start) && (addr <= module_end))) {
641 if (i && ((i % 8) == 0))
642 printk("\n ");
643
644 printk("[<%08lx>] ", addr);
645 i++;
646 }
647 }
648
649 printk("\n");
650 }
651
show_trace_task(struct task_struct * tsk)652 void show_trace_task(struct task_struct *tsk)
653 {
654 show_task((unsigned long *)tsk->thread.sp);
655 }
656