1/* 2 * linux/arch/arm/kernel/entry-common.S 3 * 4 * Copyright (C) 2000 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/config.h> 11#include "entry-header.S" 12 13/* 14 * We rely on the fact that R0 is at the bottom of the stack (due to 15 * slow/fast restore user regs). 16 */ 17#if S_R0 != 0 18#error "Please fix" 19#endif 20 21/* 22 * Our do_softirq out of line code. See include/asm-arm/softirq.h for 23 * the calling assembly. 24 */ 25ENTRY(__do_softirq) 26 stmfd sp!, {r0 - r3, ip, lr} 27 bl do_softirq 28 ldmfd sp!, {r0 - r3, ip, pc} 29 30 .align 5 31/* 32 * This is the fast syscall return path. We do as little as 33 * possible here, and this includes saving r0 back into the SVC 34 * stack. 35 */ 36ret_fast_syscall: 37 disable_irq r1 @ ensure IRQs are disabled 38 ldr r1, [tsk, #TSK_NEED_RESCHED] 39 ldr r2, [tsk, #TSK_SIGPENDING] 40 teq r1, #0 @ need_resched || sigpending 41 teqeq r2, #0 42 bne slow 43 fast_restore_user_regs 44 45/* 46 * Ok, we need to do extra processing, enter the slow path. 47 */ 48slow: str r0, [sp, #S_R0+S_OFF]! @ returned r0 49 teq r1, #0 50 beq 1f 51 52/* 53 * "slow" syscall return path. "why" tells us if this was a real syscall. 54 */ 55reschedule: 56 bl SYMBOL_NAME(schedule) 57ret_disable_irq: 58 disable_irq r1 @ ensure IRQs are disabled 59ENTRY(ret_to_user) 60ret_slow_syscall: 61 ldr r1, [tsk, #TSK_NEED_RESCHED] 62 ldr r2, [tsk, #TSK_SIGPENDING] 63 teq r1, #0 @ need_resched => schedule() 64 bne reschedule 651: teq r2, #0 @ sigpending => do_signal() 66 bne __do_signal 67restore: 68 restore_user_regs 69 70__do_signal: 71 enable_irq r1 72 mov r0, #0 @ NULL 'oldset' 73 mov r1, sp @ 'regs' 74 mov r2, why @ 'syscall' 75 bl SYMBOL_NAME(do_signal) @ note the bl above sets lr 76 disable_irq r1 @ ensure IRQs are disabled 77 b restore 78 79/* 80 * This is how we return from a fork. __switch_to will be calling us 81 * with r0 pointing at the previous task that was running (ready for 82 * calling schedule_tail). 83 */ 84ENTRY(ret_from_fork) 85 bl SYMBOL_NAME(schedule_tail) 86 get_current_task tsk 87 ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing 88 mov why, #1 89 tst ip, #PT_TRACESYS @ are we tracing syscalls? 90 beq ret_disable_irq 91 mov r1, sp 92 mov r0, #1 @ trace exit [IP = 1] 93 bl SYMBOL_NAME(syscall_trace) 94 b ret_disable_irq 95 96 97#include "calls.S" 98 99/*============================================================================= 100 * SWI handler 101 *----------------------------------------------------------------------------- 102 */ 103 104 /* If we're optimising for StrongARM the resulting code won't 105 run on an ARM7 and we can save a couple of instructions. 106 --pb */ 107#ifdef CONFIG_CPU_ARM710 108 .macro arm710_bug_check, instr, temp 109 and \temp, \instr, #0x0f000000 @ check for SWI 110 teq \temp, #0x0f000000 111 bne .Larm700bug 112 .endm 113 114.Larm700bug: 115 ldr r0, [sp, #S_PSR] @ Get calling cpsr 116 sub lr, lr, #4 117 str lr, [r8] 118 msr spsr, r0 119 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr 120 mov r0, r0 121 ldr lr, [sp, #S_PC] @ Get PC 122 add sp, sp, #S_FRAME_SIZE 123 movs pc, lr 124#else 125 .macro arm710_bug_check, instr, temp 126 .endm 127#endif 128 129 .align 5 130ENTRY(vector_swi) 131 save_user_regs 132 zero_fp 133 get_scno 134 arm710_bug_check scno, ip 135 136#ifdef CONFIG_ALIGNMENT_TRAP 137 ldr ip, __cr_alignment 138 ldr ip, [ip] 139 mcr p15, 0, ip, c1, c0 @ update control register 140#endif 141 enable_irq ip 142 143 str r4, [sp, #-S_OFF]! @ push fifth arg 144 145 get_current_task tsk 146 ldr ip, [tsk, #TSK_PTRACE] @ check for syscall tracing 147 bic scno, scno, #0xff000000 @ mask off SWI op-code 148 eor scno, scno, #OS_NUMBER << 20 @ check OS number 149 adr tbl, sys_call_table @ load syscall table pointer 150 tst ip, #PT_TRACESYS @ are we tracing syscalls? 151 bne __sys_trace 152 153 adrsvc al, lr, ret_fast_syscall @ return address 154 cmp scno, #NR_syscalls @ check upper syscall limit 155 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 156 157 add r1, sp, #S_OFF 1582: mov why, #0 @ no longer a real syscall 159 cmp scno, #ARMSWI_OFFSET 160 eor r0, scno, #OS_NUMBER << 20 @ put OS number back 161 bcs SYMBOL_NAME(arm_syscall) 162 b SYMBOL_NAME(sys_ni_syscall) @ not private func 163 164 /* 165 * This is the really slow path. We're going to be doing 166 * context switches, and waiting for our parent to respond. 167 */ 168__sys_trace: 169 add r1, sp, #S_OFF 170 mov r0, #0 @ trace entry [IP = 0] 171 bl SYMBOL_NAME(syscall_trace) 172 173 adrsvc al, lr, __sys_trace_return @ return address 174 add r1, sp, #S_R0 + S_OFF @ pointer to regs 175 cmp scno, #NR_syscalls @ check upper syscall limit 176 ldmccia r1, {r0 - r3} @ have to reload r0 - r3 177 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 178 b 2b 179 180__sys_trace_return: 181 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 182 mov r1, sp 183 mov r0, #1 @ trace exit [IP = 1] 184 bl SYMBOL_NAME(syscall_trace) 185 b ret_disable_irq 186 187 .align 5 188#ifdef CONFIG_ALIGNMENT_TRAP 189 .type __cr_alignment, #object 190__cr_alignment: 191 .word SYMBOL_NAME(cr_alignment) 192#endif 193 194 .type sys_call_table, #object 195ENTRY(sys_call_table) 196#include "calls.S" 197 198/*============================================================================ 199 * Special system call wrappers 200 */ 201@ r0 = syscall number 202@ r5 = syscall table 203 .type sys_syscall, #function 204SYMBOL_NAME(sys_syscall): 205 eor scno, r0, #OS_NUMBER << 20 206 cmp scno, #NR_syscalls @ check range 207 stmleia sp, {r5, r6} @ shuffle args 208 movle r0, r1 209 movle r1, r2 210 movle r2, r3 211 movle r3, r4 212 ldrle pc, [tbl, scno, lsl #2] 213 b sys_ni_syscall 214 215sys_fork_wrapper: 216 add r0, sp, #S_OFF 217 b SYMBOL_NAME(sys_fork) 218 219sys_vfork_wrapper: 220 add r0, sp, #S_OFF 221 b SYMBOL_NAME(sys_vfork) 222 223sys_execve_wrapper: 224 add r3, sp, #S_OFF 225 b SYMBOL_NAME(sys_execve) 226 227sys_clone_wapper: 228 add r2, sp, #S_OFF 229 b SYMBOL_NAME(sys_clone) 230 231sys_sigsuspend_wrapper: 232 add r3, sp, #S_OFF 233 b SYMBOL_NAME(sys_sigsuspend) 234 235sys_rt_sigsuspend_wrapper: 236 add r2, sp, #S_OFF 237 b SYMBOL_NAME(sys_rt_sigsuspend) 238 239sys_sigreturn_wrapper: 240 add r0, sp, #S_OFF 241 b SYMBOL_NAME(sys_sigreturn) 242 243sys_rt_sigreturn_wrapper: 244 add r0, sp, #S_OFF 245 b SYMBOL_NAME(sys_rt_sigreturn) 246 247sys_sigaltstack_wrapper: 248 ldr r2, [sp, #S_OFF + S_SP] 249 b do_sigaltstack 250 251/* 252 * Note: off_4k (r5) is always units of 4K. If we can't do the requested 253 * offset, we return EINVAL. 254 */ 255sys_mmap2: 256#if PAGE_SHIFT > 12 257 tst r5, #PGOFF_MASK 258 moveq r5, r5, lsr #PAGE_SHIFT - 12 259 streq r5, [sp, #4] 260 beq do_mmap2 261 mov r0, #-EINVAL 262 RETINSTR(mov,pc, lr) 263#else 264 str r5, [sp, #4] 265 b do_mmap2 266#endif 267