1/* -*- mode: asm -*- 2 * 3 * linux/arch/m68k/kernel/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file README.legal in the main directory of this archive 9 * for more details. 10 * 11 * Linux/m68k support by Hamish Macdonald 12 * 13 * 68060 fixes by Jesper Skov 14 * 15 */ 16 17/* 18 * entry.S contains the system-call and fault low-level handling routines. 19 * This also contains the timer-interrupt handler, as well as all interrupts 20 * and faults that can result in a task-switch. 21 * 22 * NOTE: This code handles signal-recognition, which happens every time 23 * after a timer-interrupt and after each system call. 24 * 25 */ 26 27/* 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 29 * all pointers that used to be 'current' are now entry 30 * number 0 in the 'current_set' list. 31 * 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 33 * for 68040 34 */ 35 36#include <linux/linkage.h> 37#include <asm/errno.h> 38#include <asm/setup.h> 39#include <asm/traps.h> 40#include <asm/unistd.h> 41#include <asm/asm-offsets.h> 42#include <asm/entry.h> 43 44.globl system_call, buserr, trap, resume 45.globl sys_call_table 46.globl __sys_fork, __sys_clone, __sys_vfork 47.globl bad_interrupt 48.globl auto_irqhandler_fixup 49.globl user_irqvec_fixup 50 51.text 52ENTRY(__sys_fork) 53 SAVE_SWITCH_STACK 54 jbsr sys_fork 55 lea %sp@(24),%sp 56 rts 57 58ENTRY(__sys_clone) 59 SAVE_SWITCH_STACK 60 pea %sp@(SWITCH_STACK_SIZE) 61 jbsr m68k_clone 62 lea %sp@(28),%sp 63 rts 64 65ENTRY(__sys_vfork) 66 SAVE_SWITCH_STACK 67 jbsr sys_vfork 68 lea %sp@(24),%sp 69 rts 70 71ENTRY(__sys_clone3) 72 SAVE_SWITCH_STACK 73 pea %sp@(SWITCH_STACK_SIZE) 74 jbsr m68k_clone3 75 lea %sp@(28),%sp 76 rts 77 78ENTRY(sys_sigreturn) 79 SAVE_SWITCH_STACK 80 movel %sp,%a1 | switch_stack pointer 81 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 82 lea %sp@(-84),%sp | leave a gap 83 movel %a1,%sp@- 84 movel %a0,%sp@- 85 jbsr do_sigreturn 86 jra 1f | shared with rt_sigreturn() 87 88ENTRY(sys_rt_sigreturn) 89 SAVE_SWITCH_STACK 90 movel %sp,%a1 | switch_stack pointer 91 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 92 lea %sp@(-84),%sp | leave a gap 93 movel %a1,%sp@- 94 movel %a0,%sp@- 95 | stack contents: 96 | [original pt_regs address] [original switch_stack address] 97 | [gap] [switch_stack] [pt_regs] [exception frame] 98 jbsr do_rt_sigreturn 99 1001: 101 | stack contents now: 102 | [original pt_regs address] [original switch_stack address] 103 | [unused part of the gap] [moved switch_stack] [moved pt_regs] 104 | [replacement exception frame] 105 | return value of do_{rt_,}sigreturn() points to moved switch_stack. 106 107 movel %d0,%sp | discard the leftover junk 108 RESTORE_SWITCH_STACK 109 | stack contents now is just [syscall return address] [pt_regs] [frame] 110 | return pt_regs.d0 111 movel %sp@(PT_OFF_D0+4),%d0 112 rts 113 114ENTRY(buserr) 115 SAVE_ALL_INT 116 GET_CURRENT(%d0) 117 movel %sp,%sp@- | stack frame pointer argument 118 jbsr buserr_c 119 addql #4,%sp 120 jra ret_from_exception 121 122ENTRY(trap) 123 SAVE_ALL_INT 124 GET_CURRENT(%d0) 125 movel %sp,%sp@- | stack frame pointer argument 126 jbsr trap_c 127 addql #4,%sp 128 jra ret_from_exception 129 130 | After a fork we jump here directly from resume, 131 | so that %d1 contains the previous task 132 | schedule_tail now used regardless of CONFIG_SMP 133ENTRY(ret_from_fork) 134 movel %d1,%sp@- 135 jsr schedule_tail 136 addql #4,%sp 137 jra ret_from_exception 138 139ENTRY(ret_from_kernel_thread) 140 | a3 contains the kernel thread payload, d7 - its argument 141 movel %d1,%sp@- 142 jsr schedule_tail 143 movel %d7,(%sp) 144 jsr %a3@ 145 addql #4,%sp 146 jra ret_from_exception 147 148#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 149 150#ifdef TRAP_DBG_INTERRUPT 151 152.globl dbginterrupt 153ENTRY(dbginterrupt) 154 SAVE_ALL_INT 155 GET_CURRENT(%d0) 156 movel %sp,%sp@- /* stack frame pointer argument */ 157 jsr dbginterrupt_c 158 addql #4,%sp 159 jra ret_from_exception 160#endif 161 162ENTRY(reschedule) 163 /* save top of frame */ 164 pea %sp@ 165 jbsr set_esp0 166 addql #4,%sp 167 pea ret_from_exception 168 jmp schedule 169 170ENTRY(ret_from_user_signal) 171 moveq #__NR_sigreturn,%d0 172 trap #0 173 174ENTRY(ret_from_user_rt_signal) 175 movel #__NR_rt_sigreturn,%d0 176 trap #0 177 178#else 179 180do_trace_entry: 181 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 182 subql #4,%sp 183 SAVE_SWITCH_STACK 184 jbsr syscall_trace_enter 185 RESTORE_SWITCH_STACK 186 addql #4,%sp 187 movel %sp@(PT_OFF_ORIG_D0),%d0 188 cmpl #NR_syscalls,%d0 189 jcs syscall 190badsys: 191 movel #-ENOSYS,%sp@(PT_OFF_D0) 192 jra ret_from_syscall 193 194do_trace_exit: 195 subql #4,%sp 196 SAVE_SWITCH_STACK 197 jbsr syscall_trace_leave 198 RESTORE_SWITCH_STACK 199 addql #4,%sp 200 jra .Lret_from_exception 201 202ENTRY(system_call) 203 SAVE_ALL_SYS 204 205 GET_CURRENT(%d1) 206 movel %d1,%a1 207 208 | save top of frame 209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 210 211 | syscall trace? 212 tstb %a1@(TINFO_FLAGS+2) 213 jmi do_trace_entry 214 cmpl #NR_syscalls,%d0 215 jcc badsys 216syscall: 217 jbsr @(sys_call_table,%d0:l:4)@(0) 218 movel %d0,%sp@(PT_OFF_D0) | save the return value 219ret_from_syscall: 220 |oriw #0x0700,%sr 221 movel %curptr@(TASK_STACK),%a1 222 movew %a1@(TINFO_FLAGS+2),%d0 223 jne syscall_exit_work 2241: RESTORE_ALL 225 226syscall_exit_work: 227 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 228 bnes 1b | if so, skip resched, signals 229 lslw #1,%d0 230 jcs do_trace_exit 231 jmi do_delayed_trace 232 lslw #8,%d0 233 jne do_signal_return 234 pea resume_userspace 235 jra schedule 236 237 238ENTRY(ret_from_exception) 239.Lret_from_exception: 240 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 241 bnes 1f | if so, skip resched, signals 242 | only allow interrupts when we are really the last one on the 243 | kernel stack, otherwise stack overflow can occur during 244 | heavy interrupt load 245 andw #ALLOWINT,%sr 246 247resume_userspace: 248 movel %curptr@(TASK_STACK),%a1 249 moveb %a1@(TINFO_FLAGS+3),%d0 250 jne exit_work 2511: RESTORE_ALL 252 253exit_work: 254 | save top of frame 255 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 256 lslb #1,%d0 257 jne do_signal_return 258 pea resume_userspace 259 jra schedule 260 261 262do_signal_return: 263 |andw #ALLOWINT,%sr 264 subql #4,%sp | dummy return address 265 SAVE_SWITCH_STACK 266 pea %sp@(SWITCH_STACK_SIZE) 267 bsrl do_notify_resume 268 addql #4,%sp 269 RESTORE_SWITCH_STACK 270 addql #4,%sp 271 jbra resume_userspace 272 273do_delayed_trace: 274 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR 275 pea 1 | send SIGTRAP 276 movel %curptr,%sp@- 277 pea LSIGTRAP 278 jbsr send_sig 279 addql #8,%sp 280 addql #4,%sp 281 jbra resume_userspace 282 283 284/* This is the main interrupt handler for autovector interrupts */ 285 286ENTRY(auto_inthandler) 287 SAVE_ALL_INT 288 GET_CURRENT(%d0) 289 | put exception # in d0 290 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 291 subw #VEC_SPUR,%d0 292 293 movel %sp,%sp@- 294 movel %d0,%sp@- | put vector # on stack 295auto_irqhandler_fixup = . + 2 296 jsr do_IRQ | process the IRQ 297 addql #8,%sp | pop parameters off stack 298 jra ret_from_exception 299 300/* Handler for user defined interrupt vectors */ 301 302ENTRY(user_inthandler) 303 SAVE_ALL_INT 304 GET_CURRENT(%d0) 305 | put exception # in d0 306 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 307user_irqvec_fixup = . + 2 308 subw #VEC_USER,%d0 309 310 movel %sp,%sp@- 311 movel %d0,%sp@- | put vector # on stack 312 jsr do_IRQ | process the IRQ 313 addql #8,%sp | pop parameters off stack 314 jra ret_from_exception 315 316/* Handler for uninitialized and spurious interrupts */ 317 318ENTRY(bad_inthandler) 319 SAVE_ALL_INT 320 GET_CURRENT(%d0) 321 322 movel %sp,%sp@- 323 jsr handle_badint 324 addql #4,%sp 325 jra ret_from_exception 326 327resume: 328 /* 329 * Beware - when entering resume, prev (the current task) is 330 * in a0, next (the new task) is in a1,so don't change these 331 * registers until their contents are no longer needed. 332 */ 333 334 /* save sr */ 335 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 336 337 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 338 movec %sfc,%d0 339 movew %d0,%a0@(TASK_THREAD+THREAD_FC) 340 341 /* save usp */ 342 /* it is better to use a movel here instead of a movew 8*) */ 343 movec %usp,%d0 344 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 345 346 /* save non-scratch registers on stack */ 347 SAVE_SWITCH_STACK 348 349 /* save current kernel stack pointer */ 350 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 351 352 /* save floating point context */ 353#ifndef CONFIG_M68KFPU_EMU_ONLY 354#ifdef CONFIG_M68KFPU_EMU 355 tstl m68k_fputype 356 jeq 3f 357#endif 358 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 359 360#if defined(CONFIG_M68060) 361#if !defined(CPU_M68060_ONLY) 362 btst #3,m68k_cputype+3 363 beqs 1f 364#endif 365 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 366 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 367 jeq 3f 368#if !defined(CPU_M68060_ONLY) 369 jra 2f 370#endif 371#endif /* CONFIG_M68060 */ 372#if !defined(CPU_M68060_ONLY) 3731: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 374 jeq 3f 375#endif 3762: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 377 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 3783: 379#endif /* CONFIG_M68KFPU_EMU_ONLY */ 380 /* Return previous task in %d1 */ 381 movel %curptr,%d1 382 383 /* switch to new task (a1 contains new task) */ 384 movel %a1,%curptr 385 386 /* restore floating point context */ 387#ifndef CONFIG_M68KFPU_EMU_ONLY 388#ifdef CONFIG_M68KFPU_EMU 389 tstl m68k_fputype 390 jeq 4f 391#endif 392#if defined(CONFIG_M68060) 393#if !defined(CPU_M68060_ONLY) 394 btst #3,m68k_cputype+3 395 beqs 1f 396#endif 397 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 398 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 399 jeq 3f 400#if !defined(CPU_M68060_ONLY) 401 jra 2f 402#endif 403#endif /* CONFIG_M68060 */ 404#if !defined(CPU_M68060_ONLY) 4051: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 406 jeq 3f 407#endif 4082: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 409 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 4103: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 4114: 412#endif /* CONFIG_M68KFPU_EMU_ONLY */ 413 414 /* restore the kernel stack pointer */ 415 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 416 417 /* restore non-scratch registers */ 418 RESTORE_SWITCH_STACK 419 420 /* restore user stack pointer */ 421 movel %a1@(TASK_THREAD+THREAD_USP),%a0 422 movel %a0,%usp 423 424 /* restore fs (sfc,%dfc) */ 425 movew %a1@(TASK_THREAD+THREAD_FC),%a0 426 movec %a0,%sfc 427 movec %a0,%dfc 428 429 /* restore status register */ 430 movew %a1@(TASK_THREAD+THREAD_SR),%sr 431 432 rts 433 434#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 435