1/* -*- mode: asm -*-
2 *
3 *  linux/arch/m68k/kernel/entry.S
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S  contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 *               all pointers that used to be 'current' are now entry
30 *               number 0 in the 'current_set' list.
31 *
32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
33 *		 for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/entry.h>
38#include <asm/errno.h>
39#include <asm/setup.h>
40#include <asm/segment.h>
41#include <asm/traps.h>
42#include <asm/unistd.h>
43
44#include <asm/asm-offsets.h>
45
46.globl system_call, buserr, trap, resume
47.globl sys_call_table
48.globl sys_fork, sys_clone, sys_vfork
49.globl ret_from_interrupt, bad_interrupt
50.globl auto_irqhandler_fixup
51.globl user_irqvec_fixup
52
53.text
54ENTRY(buserr)
55	SAVE_ALL_INT
56	GET_CURRENT(%d0)
57	movel	%sp,%sp@-		| stack frame pointer argument
58	bsrl	buserr_c
59	addql	#4,%sp
60	jra	.Lret_from_exception
61
62ENTRY(trap)
63	SAVE_ALL_INT
64	GET_CURRENT(%d0)
65	movel	%sp,%sp@-		| stack frame pointer argument
66	bsrl	trap_c
67	addql	#4,%sp
68	jra	.Lret_from_exception
69
70	| After a fork we jump here directly from resume,
71	| so that %d1 contains the previous task
72	| schedule_tail now used regardless of CONFIG_SMP
73ENTRY(ret_from_fork)
74	movel	%d1,%sp@-
75	jsr	schedule_tail
76	addql	#4,%sp
77	jra	.Lret_from_exception
78
79do_trace_entry:
80	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
81	subql	#4,%sp
82	SAVE_SWITCH_STACK
83	jbsr	syscall_trace
84	RESTORE_SWITCH_STACK
85	addql	#4,%sp
86	movel	%sp@(PT_OFF_ORIG_D0),%d0
87	cmpl	#NR_syscalls,%d0
88	jcs	syscall
89badsys:
90	movel	#-ENOSYS,%sp@(PT_OFF_D0)
91	jra	ret_from_syscall
92
93do_trace_exit:
94	subql	#4,%sp
95	SAVE_SWITCH_STACK
96	jbsr	syscall_trace
97	RESTORE_SWITCH_STACK
98	addql	#4,%sp
99	jra	.Lret_from_exception
100
101ENTRY(ret_from_signal)
102	movel	%curptr@(TASK_STACK),%a1
103	tstb	%a1@(TINFO_FLAGS+2)
104	jge	1f
105	jbsr	syscall_trace
1061:	RESTORE_SWITCH_STACK
107	addql	#4,%sp
108/* on 68040 complete pending writebacks if any */
109#ifdef CONFIG_M68040
110	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
111	subql	#7,%d0				| bus error frame ?
112	jbne	1f
113	movel	%sp,%sp@-
114	jbsr	berr_040cleanup
115	addql	#4,%sp
1161:
117#endif
118	jra	.Lret_from_exception
119
120ENTRY(system_call)
121	SAVE_ALL_SYS
122
123	GET_CURRENT(%d1)
124	movel	%d1,%a1
125
126	| save top of frame
127	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
128
129	| syscall trace?
130	tstb	%a1@(TINFO_FLAGS+2)
131	jmi	do_trace_entry
132	cmpl	#NR_syscalls,%d0
133	jcc	badsys
134syscall:
135	jbsr	@(sys_call_table,%d0:l:4)@(0)
136	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
137ret_from_syscall:
138	|oriw	#0x0700,%sr
139	movel	%curptr@(TASK_STACK),%a1
140	movew	%a1@(TINFO_FLAGS+2),%d0
141	jne	syscall_exit_work
1421:	RESTORE_ALL
143
144syscall_exit_work:
145	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
146	bnes	1b			| if so, skip resched, signals
147	lslw	#1,%d0
148	jcs	do_trace_exit
149	jmi	do_delayed_trace
150	lslw	#8,%d0
151	jmi	do_signal_return
152	pea	resume_userspace
153	jra	schedule
154
155
156ENTRY(ret_from_exception)
157.Lret_from_exception:
158	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
159	bnes	1f			| if so, skip resched, signals
160	| only allow interrupts when we are really the last one on the
161	| kernel stack, otherwise stack overflow can occur during
162	| heavy interrupt load
163	andw	#ALLOWINT,%sr
164
165resume_userspace:
166	movel	%curptr@(TASK_STACK),%a1
167	moveb	%a1@(TINFO_FLAGS+3),%d0
168	jne	exit_work
1691:	RESTORE_ALL
170
171exit_work:
172	| save top of frame
173	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
174	lslb	#1,%d0
175	jmi	do_signal_return
176	pea	resume_userspace
177	jra	schedule
178
179
180do_signal_return:
181	|andw	#ALLOWINT,%sr
182	subql	#4,%sp			| dummy return address
183	SAVE_SWITCH_STACK
184	pea	%sp@(SWITCH_STACK_SIZE)
185	bsrl	do_signal
186	addql	#4,%sp
187	RESTORE_SWITCH_STACK
188	addql	#4,%sp
189	jbra	resume_userspace
190
191do_delayed_trace:
192	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
193	pea	1			| send SIGTRAP
194	movel	%curptr,%sp@-
195	pea	LSIGTRAP
196	jbsr	send_sig
197	addql	#8,%sp
198	addql	#4,%sp
199	jbra	resume_userspace
200
201
202/* This is the main interrupt handler for autovector interrupts */
203
204ENTRY(auto_inthandler)
205	SAVE_ALL_INT
206	GET_CURRENT(%d0)
207	movel	%d0,%a1
208	addqb	#1,%a1@(TINFO_PREEMPT+1)
209					|  put exception # in d0
210	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
211	subw	#VEC_SPUR,%d0
212
213	movel	%sp,%sp@-
214	movel	%d0,%sp@-		|  put vector # on stack
215auto_irqhandler_fixup = . + 2
216	jsr	do_IRQ			|  process the IRQ
217	addql	#8,%sp			|  pop parameters off stack
218
219ret_from_interrupt:
220	movel	%curptr@(TASK_STACK),%a1
221	subqb	#1,%a1@(TINFO_PREEMPT+1)
222	jeq	ret_from_last_interrupt
2232:	RESTORE_ALL
224
225	ALIGN
226ret_from_last_interrupt:
227	moveq	#(~ALLOWINT>>8)&0xff,%d0
228	andb	%sp@(PT_OFF_SR),%d0
229	jne	2b
230
231	/* check if we need to do software interrupts */
232	tstl	irq_stat+CPUSTAT_SOFTIRQ_PENDING
233	jeq	.Lret_from_exception
234	pea	ret_from_exception
235	jra	do_softirq
236
237/* Handler for user defined interrupt vectors */
238
239ENTRY(user_inthandler)
240	SAVE_ALL_INT
241	GET_CURRENT(%d0)
242	movel	%d0,%a1
243	addqb	#1,%a1@(TINFO_PREEMPT+1)
244					|  put exception # in d0
245	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
246user_irqvec_fixup = . + 2
247	subw	#VEC_USER,%d0
248
249	movel	%sp,%sp@-
250	movel	%d0,%sp@-		|  put vector # on stack
251	jsr	do_IRQ			|  process the IRQ
252	addql	#8,%sp			|  pop parameters off stack
253
254	movel	%curptr@(TASK_STACK),%a1
255	subqb	#1,%a1@(TINFO_PREEMPT+1)
256	jeq	ret_from_last_interrupt
257	RESTORE_ALL
258
259/* Handler for uninitialized and spurious interrupts */
260
261ENTRY(bad_inthandler)
262	SAVE_ALL_INT
263	GET_CURRENT(%d0)
264	movel	%d0,%a1
265	addqb	#1,%a1@(TINFO_PREEMPT+1)
266
267	movel	%sp,%sp@-
268	jsr	handle_badint
269	addql	#4,%sp
270
271	movel	%curptr@(TASK_STACK),%a1
272	subqb	#1,%a1@(TINFO_PREEMPT+1)
273	jeq	ret_from_last_interrupt
274	RESTORE_ALL
275
276
277ENTRY(sys_fork)
278	SAVE_SWITCH_STACK
279	pea	%sp@(SWITCH_STACK_SIZE)
280	jbsr	m68k_fork
281	addql	#4,%sp
282	RESTORE_SWITCH_STACK
283	rts
284
285ENTRY(sys_clone)
286	SAVE_SWITCH_STACK
287	pea	%sp@(SWITCH_STACK_SIZE)
288	jbsr	m68k_clone
289	addql	#4,%sp
290	RESTORE_SWITCH_STACK
291	rts
292
293ENTRY(sys_vfork)
294	SAVE_SWITCH_STACK
295	pea	%sp@(SWITCH_STACK_SIZE)
296	jbsr	m68k_vfork
297	addql	#4,%sp
298	RESTORE_SWITCH_STACK
299	rts
300
301ENTRY(sys_sigreturn)
302	SAVE_SWITCH_STACK
303	jbsr	do_sigreturn
304	RESTORE_SWITCH_STACK
305	rts
306
307ENTRY(sys_rt_sigreturn)
308	SAVE_SWITCH_STACK
309	jbsr	do_rt_sigreturn
310	RESTORE_SWITCH_STACK
311	rts
312
313resume:
314	/*
315	 * Beware - when entering resume, prev (the current task) is
316	 * in a0, next (the new task) is in a1,so don't change these
317	 * registers until their contents are no longer needed.
318	 */
319
320	/* save sr */
321	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
322
323	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
324	movec	%sfc,%d0
325	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
326
327	/* save usp */
328	/* it is better to use a movel here instead of a movew 8*) */
329	movec	%usp,%d0
330	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
331
332	/* save non-scratch registers on stack */
333	SAVE_SWITCH_STACK
334
335	/* save current kernel stack pointer */
336	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
337
338	/* save floating point context */
339#ifndef CONFIG_M68KFPU_EMU_ONLY
340#ifdef CONFIG_M68KFPU_EMU
341	tstl	m68k_fputype
342	jeq	3f
343#endif
344	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
345
346#if defined(CONFIG_M68060)
347#if !defined(CPU_M68060_ONLY)
348	btst	#3,m68k_cputype+3
349	beqs	1f
350#endif
351	/* The 060 FPU keeps status in bits 15-8 of the first longword */
352	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
353	jeq	3f
354#if !defined(CPU_M68060_ONLY)
355	jra	2f
356#endif
357#endif /* CONFIG_M68060 */
358#if !defined(CPU_M68060_ONLY)
3591:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
360	jeq	3f
361#endif
3622:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
363	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3643:
365#endif	/* CONFIG_M68KFPU_EMU_ONLY */
366	/* Return previous task in %d1 */
367	movel	%curptr,%d1
368
369	/* switch to new task (a1 contains new task) */
370	movel	%a1,%curptr
371
372	/* restore floating point context */
373#ifndef CONFIG_M68KFPU_EMU_ONLY
374#ifdef CONFIG_M68KFPU_EMU
375	tstl	m68k_fputype
376	jeq	4f
377#endif
378#if defined(CONFIG_M68060)
379#if !defined(CPU_M68060_ONLY)
380	btst	#3,m68k_cputype+3
381	beqs	1f
382#endif
383	/* The 060 FPU keeps status in bits 15-8 of the first longword */
384	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
385	jeq	3f
386#if !defined(CPU_M68060_ONLY)
387	jra	2f
388#endif
389#endif /* CONFIG_M68060 */
390#if !defined(CPU_M68060_ONLY)
3911:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
392	jeq	3f
393#endif
3942:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
395	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3963:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
3974:
398#endif	/* CONFIG_M68KFPU_EMU_ONLY */
399
400	/* restore the kernel stack pointer */
401	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
402
403	/* restore non-scratch registers */
404	RESTORE_SWITCH_STACK
405
406	/* restore user stack pointer */
407	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
408	movel	%a0,%usp
409
410	/* restore fs (sfc,%dfc) */
411	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
412	movec	%a0,%sfc
413	movec	%a0,%dfc
414
415	/* restore status register */
416	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
417
418	rts
419
420