1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/ppc_asm.h>
31#include "ppc_defs.h"
32
33#undef SHOW_SYSCALLS
34#undef SHOW_SYSCALLS_TASK
35
36#ifdef SHOW_SYSCALLS_TASK
37	.data
38show_syscalls_task:
39	.long	-1
40#endif
41
42/*
43 * Handle a system call.
44 */
45	.text
46	.stabs	"arch/ppc/kernel/",N_SO,0,0,0f
47	.stabs	"entry.S",N_SO,0,0,0f
480:
49
50_GLOBAL(DoSyscall)
51	stw	r0,THREAD+LAST_SYSCALL(r2)
52	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
53	lis	r10,0x1000
54	andc	r11,r11,r10
55	stw	r11,_CCR(r1)
56#ifdef SHOW_SYSCALLS
57#ifdef SHOW_SYSCALLS_TASK
58	lis	r31,show_syscalls_task@ha
59	lwz	r31,show_syscalls_task@l(r31)
60	cmp	0,r2,r31
61	bne	1f
62#endif
63	lis	r3,7f@ha
64	addi	r3,r3,7f@l
65	lwz	r4,GPR0(r1)
66	lwz	r5,GPR3(r1)
67	lwz	r6,GPR4(r1)
68	lwz	r7,GPR5(r1)
69	lwz	r8,GPR6(r1)
70	lwz	r9,GPR7(r1)
71	bl	printk
72	lis	r3,77f@ha
73	addi	r3,r3,77f@l
74	lwz	r4,GPR8(r1)
75	lwz	r5,GPR9(r1)
76	mr	r6,r2
77	bl	printk
78	lwz	r0,GPR0(r1)
79	lwz	r3,GPR3(r1)
80	lwz	r4,GPR4(r1)
81	lwz	r5,GPR5(r1)
82	lwz	r6,GPR6(r1)
83	lwz	r7,GPR7(r1)
84	lwz	r8,GPR8(r1)
851:
86#endif /* SHOW_SYSCALLS */
87	cmpi	0,r0,0x7777	/* Special case for 'sys_sigreturn' */
88	beq-	10f
89	cmpi    0,r0,0x6666     /* Special case for 'sys_rt_sigreturn' */
90	beq-    16f
91	lwz	r10,TASK_PTRACE(r2)
92	andi.	r10,r10,PT_TRACESYS
93	bne-	50f
94	cmpli	0,r0,NR_syscalls
95	bge-	66f
96	lis	r10,sys_call_table@h
97	ori	r10,r10,sys_call_table@l
98	slwi	r0,r0,2
99	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
100	cmpi	0,r10,0
101	beq-	66f
102	mtlr	r10
103	addi	r9,r1,STACK_FRAME_OVERHEAD
104	blrl			/* Call handler */
105	.globl	ret_from_syscall_1
106ret_from_syscall_1:
10720:	stw	r3,RESULT(r1)	/* Save result */
108#ifdef SHOW_SYSCALLS
109#ifdef SHOW_SYSCALLS_TASK
110	cmp	0,r2,r31
111	bne	91f
112#endif
113	mr	r4,r3
114	lis	r3,79f@ha
115	addi	r3,r3,79f@l
116	bl	printk
117	lwz	r3,RESULT(r1)
11891:
119#endif
120	li	r10,-_LAST_ERRNO
121	cmpl	0,r3,r10
122	blt	30f
123	neg	r3,r3
124	cmpi	0,r3,ERESTARTNOHAND
125	bne	22f
126	li	r3,EINTR
12722:	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
128	oris	r10,r10,0x1000
129	stw	r10,_CCR(r1)
13030:	stw	r3,GPR3(r1)	/* Update return value */
131	b	ret_from_except
13266:	li	r3,ENOSYS
133	b	22b
134/* sys_sigreturn */
13510:	addi	r3,r1,STACK_FRAME_OVERHEAD
136	bl	sys_sigreturn
137	cmpi    0,r3,0          /* Check for restarted system call */
138	bge     ret_from_except
139	b       20b
140/* sys_rt_sigreturn */
14116:	addi    r3,r1,STACK_FRAME_OVERHEAD
142	bl      sys_rt_sigreturn
143	cmpi	0,r3,0		/* Check for restarted system call */
144	bge	ret_from_except
145	b	20b
146/* Traced system call support */
14750:	bl	syscall_trace
148	lwz	r0,GPR0(r1)	/* Restore original registers */
149	lwz	r3,GPR3(r1)
150	lwz	r4,GPR4(r1)
151	lwz	r5,GPR5(r1)
152	lwz	r6,GPR6(r1)
153	lwz	r7,GPR7(r1)
154	lwz	r8,GPR8(r1)
155	lwz	r9,GPR9(r1)
156	cmpli	0,r0,NR_syscalls
157	bge-	66f
158	lis	r10,sys_call_table@h
159	ori	r10,r10,sys_call_table@l
160	slwi	r0,r0,2
161	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
162	cmpi	0,r10,0
163	beq-	66f
164	mtlr	r10
165	addi	r9,r1,STACK_FRAME_OVERHEAD
166	blrl			/* Call handler */
167	.globl	ret_from_syscall_2
168ret_from_syscall_2:
169	stw	r3,RESULT(r1)	/* Save result */
170	li	r10,-_LAST_ERRNO
171	cmpl	0,r3,r10
172	blt	60f
173	neg	r3,r3
174	cmpi	0,r3,ERESTARTNOHAND
175	bne	52f
176	li	r3,EINTR
17752:	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
178	oris	r10,r10,0x1000
179	stw	r10,_CCR(r1)
18060:	stw	r3,GPR3(r1)	/* Update return value */
181	bl	syscall_trace
182	b	ret_from_except
18366:	li	r3,ENOSYS
184	b	52b
185#ifdef SHOW_SYSCALLS
1867:	.string	"syscall %d(%x, %x, %x, %x, %x, "
18777:	.string	"%x, %x), current=%p\n"
18879:	.string	" -> %x\n"
189	.align	2,0
190#endif
191
192/*
193 * This routine switches between two different tasks.  The process
194 * state of one is saved on its kernel stack.  Then the state
195 * of the other is restored from its kernel stack.  The memory
196 * management hardware is updated to the second process's state.
197 * Finally, we can return to the second process.
198 * On entry, r3 points to the THREAD for the current task, r4
199 * points to the THREAD for the new task.
200 *
201 * Note: there are two ways to get to the "going out" portion
202 * of this code; either by coming in via the entry (_switch)
203 * or via "fork" which must set up an environment equivalent
204 * to the "_switch" path.  If you change this , you'll have to
205 * change the fork code also.
206 *
207 * The code which creates the new task context is in 'copy_thread'
208 * in arch/ppc/kernel/process.c
209 */
210_GLOBAL(_switch)
211	stwu	r1,-INT_FRAME_SIZE(r1)
212	stw	r0,GPR0(r1)
213	lwz	r0,0(r1)
214	stw	r0,GPR1(r1)
215	/* r3-r13 are caller saved -- Cort */
216	SAVE_GPR(2, r1)
217	SAVE_8GPRS(14, r1)
218	SAVE_10GPRS(22, r1)
219	mflr	r20		/* Return to switch caller */
220	stw	r20,INT_FRAME_SIZE+4(r1)
221	mfmsr	r22
222	li	r0,MSR_FP	/* Disable floating-point */
223#ifdef CONFIG_ALTIVEC
224BEGIN_FTR_SECTION
225	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
226END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
227#endif /* CONFIG_ALTIVEC */
228	and.	r0,r0,r22	/* FP or altivec enabled? */
229	beq+	1f
230	andc	r22,r22,r0
231	mtmsr	r22
232	isync
2331:	stw	r20,_NIP(r1)
234	stw	r22,_MSR(r1)
235	stw	r20,_LINK(r1)
236	mfcr	r20
237	mfctr	r22
238	mfspr	r23,XER
239	stw	r20,_CCR(r1)
240	stw	r22,_CTR(r1)
241	stw	r23,_XER(r1)
242	li	r0,0x0ff0
243	stw	r0,TRAP(r1)
244	stw	r1,KSP(r3)	/* Set old stack pointer */
245
246	tophys(r0,r4)
247	CLR_TOP32(r0)
248	mtspr	SPRG3,r0	/* Update current THREAD phys addr */
249	lwz	r1,KSP(r4)	/* Load new stack pointer */
250	/* save the old current 'last' for return value */
251	mr	r3,r2
252	addi	r2,r4,-THREAD	/* Update current */
253	lwz	r0,_CCR(r1)
254	mtcrf	0xFF,r0
255	/* r3-r13 are destroyed -- Cort */
256	REST_2GPRS(14, r1)
257	REST_8GPRS(16, r1)
258	REST_8GPRS(24, r1)
259
260	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
261	mtlr	r4
262	addi	r1,r1,INT_FRAME_SIZE
263	blr
264
265	.globl	syscall_direct_return
266syscall_direct_return:
267	addi	r1,r3,-STACK_FRAME_OVERHEAD
268	lwz	r10,TASK_PTRACE(r2)
269	andi.	r10,r10,PT_TRACESYS
270	beq+	ret_from_except
271	bl	syscall_trace
272	b	ret_from_except
273
274	.globl	ret_from_fork
275ret_from_fork:
276	bl	schedule_tail
277	lwz	r0,TASK_PTRACE(r2)
278	andi.	r0,r0,PT_TRACESYS
279	bnel-	syscall_trace
280	b	ret_from_except
281
282	.globl	ret_from_intercept
283ret_from_intercept:
284	/*
285	 * We may be returning from RTL and cannot do the normal checks
286	 * -- Cort
287	 */
288	cmpi	0,r3,0
289	beq	restore
290	.globl	ret_from_except
291ret_from_except:
292	lwz	r3,_MSR(r1)	/* Returning to user mode? */
293	andi.	r3,r3,MSR_PR
294	beq+	do_signal_ret	/* if so, check need_resched and signals */
295	lwz	r3,NEED_RESCHED(r2)
296	cmpi	0,r3,0		/* check need_resched flag */
297	beq+	7f
298	bl	schedule
2997:	lwz	r5,SIGPENDING(r2) /* Check for pending unblocked signals */
300	cmpwi	0,r5,0
301	beq+	do_signal_ret
302	li	r3,0
303	addi	r4,r1,STACK_FRAME_OVERHEAD
304	bl	do_signal
305	.globl	do_signal_ret
306do_signal_ret:
307	.globl ret_to_user_hook
308ret_to_user_hook:
309	nop
310restore:
311	lwz	r3,_XER(r1)
312	mtspr	XER,r3
313	REST_10GPRS(9,r1)
314	REST_10GPRS(19,r1)
315	REST_2GPRS(29,r1)
316	REST_GPR(31,r1)
317
318	/* make sure we hard disable here, even if rtl is active, to protect
319	 * SRR[01] and SPRG2 -- Cort
320	 */
321	mfmsr	r0		/* Get current interrupt state */
322	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
323#ifdef CONFIG_4xx
324 	rlwinm	r0,r0,0,23,21	/* clear MSR_DE in r0 */
325#endif
326	SYNC			/* Some chip revs have problems here... */
327	mtmsr	r0		/* Update machine state */
328
329	PPC405_ERR77(0,r1)
330	stwcx.	r0,0,r1		/* to clear the reservation */
331
332	/* if returning to user mode, set new sprg2 and save kernel SP */
333	lwz	r0,_MSR(r1)
334	andi.	r0,r0,MSR_PR
335	beq+	1f
336#ifdef CONFIG_ALTIVEC
337BEGIN_FTR_SECTION
338	lwz	r0,THREAD+THREAD_VRSAVE(r2)
339	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
340END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
341#endif /* CONFIG_ALTIVEC */
342#if defined(CONFIG_4xx) && !defined(CONFIG_BDI_SWITCH)
343	/* Restore the processor debugging state of the thread.  Only do
344	 * this if we aren't using an Abatron BDI JTAG debugger.  It doesn't
345	 * tolerate others mucking with the debug registers.
346	 */
347	lwz	r0,THREAD+THREAD_DBCR0(r2)
348	mtspr	SPRN_DBCR0,r0
349#endif
350	addi	r0,r1,INT_FRAME_SIZE	/* size of frame */
351	stw	r0,THREAD+KSP(r2)	/* save kernel stack pointer */
352	tophys(r8,r1)
353	CLR_TOP32(r8)
354	mtspr	SPRG2,r8		/* phys exception stack pointer */
3551:
356	lwz	r3,_CTR(r1)
357	lwz	r0,_LINK(r1)
358	mtctr	r3
359	mtlr	r0
360	REST_4GPRS(3, r1)
361	REST_2GPRS(7, r1)
362
363#ifndef CONFIG_SMP
364	/* We have to "dummy" load from the context save area in case
365	 * these instructions cause an MMU fault.  If this happens
366	 * after we load SRR0/SRR1, our return context is hosed.  -- Dan
367	 *
368	 * This workaround is not enough, we must also make sure the
369	 * actual code for this routine is in the TLB or BAT mapped.
370	 * For 6xx/Power3, we know the code is in a BAT, so this should
371	 * be enough in UP. In SMP, I limit lowmem to the amount of
372	 * RAM that can be BAT mapped. Other CPUs may need additional
373	 * tweaks, especially if used SMP or if the code for this routine
374	 * crosses page boundaries. The TLB pin down for 4xx should help
375	 * for example. --BenH.
376	 */
377	lwz	r0,GPR0(r1)
378	lwz	r0,GPR2(r1)
379	lwz	r0,GPR1(r1)
380#endif /* ndef CONFIG_SMP */
381
382	/* We re-use r3,r4 here (the load above was to cause the MMU
383	 * fault if necessary).  Using r3,r4 removes the need to "dummy"
384	 * load the CCR and NIP.  Since we load them we may as well
385	 * use them.
386	 */
387	lwz	r3,_CCR(r1)
388	lwz	r4,_NIP(r1)
389
390	lwz	r0,_MSR(r1)
391	FIX_SRR1(r0,r2)
392	mtspr	SRR1,r0
393	mtcrf	0xFF,r3
394	mtspr	SRR0,r4
395	lwz	r0,GPR0(r1)
396	lwz	r2,GPR2(r1)
397	lwz	r3,GPR3(r1)
398	lwz	r4,GPR4(r1)
399	lwz	r1,GPR1(r1)
400	SYNC
401	PPC405_ERR77_SYNC
402	RFI
403
404
405/*
406 * PROM code for specific machines follows.  Put it
407 * here so it's easy to add arch-specific sections later.
408 * -- Cort
409 */
410#if defined(CONFIG_ALL_PPC)
411/*
412 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
413 * called with the MMU off.
414 */
415	.globl	enter_rtas
416enter_rtas:
417	mflr	r0
418	stw	r0,20(r1)
419	lis	r4,rtas_data@ha
420	lwz	r4,rtas_data@l(r4)
421	lis	r6,1f@ha	/* physical return address for rtas */
422	addi	r6,r6,1f@l
423	addis	r6,r6,-KERNELBASE@h
424	subi	r7,r1,INT_FRAME_SIZE
425	addis	r7,r7,-KERNELBASE@h
426	lis	r8,rtas_entry@ha
427	lwz	r8,rtas_entry@l(r8)
428	mfmsr	r9
429	stw	r9,8(r1)
430	li	r0,0
431	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_FE0|MSR_FE1
432	andc	r0,r9,r0
433	li	r10,MSR_IR|MSR_DR|MSR_FP
434	andc	r9,r0,r10
435	SYNC			/* disable interrupts so SRR0/1 */
436	mtmsr	r0		/* don't get trashed */
437	mtlr	r6
438	CLR_TOP32(r7)
439	mtspr	SPRG2,r7
440	mtspr	SRR0,r8
441	mtspr	SRR1,r9
442	RFI
4431:	addis	r9,r1,-KERNELBASE@h
444	lwz	r8,20(r9)	/* get return address */
445	lwz	r9,8(r9)	/* original msr value */
446	FIX_SRR1(r9,r0)
447	li	r0,0
448	mtspr	SPRG2,r0
449	mtspr	SRR0,r8
450	mtspr	SRR1,r9
451	RFI			/* return to caller */
452#endif /* CONFIG_ALL_PPC */
453