1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/config.h>
26#include <asm/offset.h>
27
28/* we have the following possibilities to act on an interruption:
29 *  - handle in assembly and use shadowed registers only
30 *  - save registers to kernel stack and handle in assembly or C */
31
32
33#include <asm/assembly.h>	/* for LDREG/STREG defines */
34#include <asm/pgtable.h>
35#include <asm/psw.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38
39#ifdef __LP64__
40#define FRAME_SIZE	128
41#define CMPIB           cmpib,*
42#define CMPB            cmpb,*
43
44	.level 2.0w
45#else
46#define FRAME_SIZE	64
47#define CMPIB           cmpib,
48#define CMPB            cmpb,
49
50	.level 2.0
51#endif
52
53	.import         pa_dbit_lock,data
54
55	/* space_to_prot macro creates a prot id from a space id */
56
57#if (SPACEID_SHIFT) == 0
58	.macro  space_to_prot spc prot
59	depd,z  \spc,62,31,\prot
60	.endm
61#else
62	.macro  space_to_prot spc prot
63	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
64	.endm
65#endif
66
67	/* Switch to virtual mapping, trashing only %r1 */
68	.macro  virt_map
69	rsm     PSW_SM_Q,%r0
70	tovirt_r1 %r29
71	mfsp	%sr7, %r1
72	or,=    %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
73	mtsp	%r1, %sr3
74	mtsp	%r0, %sr4
75	mtsp	%r0, %sr5
76	mtsp	%r0, %sr6
77	mtsp	%r0, %sr7
78	ldil	L%KERNEL_PSW, %r1
79	ldo	R%KERNEL_PSW(%r1), %r1
80	mtctl	%r1, %cr22
81	mtctl	%r0, %cr17
82	mtctl	%r0, %cr17
83	ldil	L%4f, %r1
84	ldo	R%4f(%r1), %r1
85	mtctl	%r1, %cr18
86	ldo	4(%r1), %r1
87	mtctl	%r1, %cr18
88	rfir
89	nop
904:
91	.endm
92
93	/*
94	 * The "get_stack" macros are responsible for determining the
95	 * kernel stack value.
96	 *
97	 * For Faults:
98	 *      If sr7 == 0
99	 *          Already using a kernel stack, so call the
100	 *          get_stack_use_r30 macro to push a pt_regs structure
101	 *          on the stack, and store registers there.
102	 *      else
103	 *          Need to set up a kernel stack, so call the
104	 *          get_stack_use_cr30 macro to set up a pointer
105	 *          to the pt_regs structure contained within the
106	 *          task pointer pointed to by cr30. Set the stack
107	 *          pointer to point to the end of the task structure.
108	 *
109	 * For Interrupts:
110	 *      If sr7 == 0
111	 *          Already using a kernel stack, check to see if r30
112	 *          is already pointing to the per processor interrupt
113	 *          stack. If it is, call the get_stack_use_r30 macro
114	 *          to push a pt_regs structure on the stack, and store
115	 *          registers there. Otherwise, call get_stack_use_cr31
116	 *          to get a pointer to the base of the interrupt stack
117	 *          and push a pt_regs structure on that stack.
118	 *      else
119	 *          Need to set up a kernel stack, so call the
120	 *          get_stack_use_cr30 macro to set up a pointer
121	 *          to the pt_regs structure contained within the
122	 *          task pointer pointed to by cr30. Set the stack
123	 *          pointer to point to the end of the task structure.
124	 *          N.B: We don't use the interrupt stack for the
125	 *          first interrupt from userland, because signals/
126	 *          resched's are processed when returning to userland,
127	 *          and we can sleep in those cases.
128	 *
129	 * Note that we use shadowed registers for temps until
130	 * we can save %r26 and %r29. %r26 is used to preserve
131	 * %r8 (a shadowed register) which temporarily contained
132	 * either the fault type ("code") or the eirr. We need
133	 * to use a non-shadowed register to carry the value over
134	 * the rfir in virt_map. We use %r26 since this value winds
135	 * up being passed as the argument to either do_cpu_irq_mask
136	 * or handle_interruption. %r29 is used to hold a pointer
137	 * the register save area, and once again, it needs to
138	 * be a non-shadowed register so that it survives the rfir.
139	 *
140	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
141	 */
142
143	.macro  get_stack_use_cr30
144
145	/* we save the registers in the task struct */
146
147	mfctl   %cr30, %r1
148	tophys  %r1,%r9
149	ldo     TASK_REGS(%r9),%r9
150	STREG   %r30, PT_GR30(%r9)
151	ldo	TASK_SZ_ALGN(%r1), %r30
152	STREG   %r29,PT_GR29(%r9)
153	STREG   %r26,PT_GR26(%r9)
154	copy    %r9,%r29
155	.endm
156
157	.macro  get_stack_use_r30
158
159	/* we put a struct pt_regs on the stack and save the registers there */
160
161	tophys  %r30,%r9
162	STREG   %r30,PT_GR30(%r9)
163	ldo	PT_SZ_ALGN(%r30),%r30
164	STREG   %r29,PT_GR29(%r9)
165	STREG   %r26,PT_GR26(%r9)
166	copy    %r9,%r29
167	.endm
168
169	.macro  rest_stack
170	LDREG   PT_GR1(%r29), %r1
171	LDREG   PT_GR30(%r29),%r30
172	LDREG   PT_GR29(%r29),%r29
173	.endm
174
175	/* default interruption handler
176	 * (calls traps.c:handle_interruption) */
177	.macro	def code
178	b	intr_save
179	ldi     \code, %r8
180	.align	32
181	.endm
182
183	/* Interrupt interruption handler
184	 * (calls irq.c:do_cpu_irq_mask) */
185	.macro	extint code
186	b	intr_extint
187	mfsp    %sr7,%r16
188	.align	32
189	.endm
190
191	.import	os_hpmc, code
192
193	/* HPMC handler */
194	.macro	hpmc code
195	nop			/* must be a NOP, will be patched later */
196	ldil	L%PA(os_hpmc), %r3
197	ldo	R%PA(os_hpmc)(%r3), %r3
198	bv,n	0(%r3)
199	nop
200	.word	0		/* checksum (will be patched) */
201	.word	PA(os_hpmc)	/* address of handler */
202	.word	0		/* length of handler */
203	.endm
204
205	/*
206	 * Performance Note: Instructions will be moved up into
207	 * this part of the code later on, once we are sure
208	 * that the tlb miss handlers are close to final form.
209	 */
210
211	/* Register definitions for tlb miss handler macros */
212
213	va  = r8	/* virtual address for which the trap occured */
214	spc = r24	/* space for which the trap occured */
215
216#ifndef __LP64__
217
218	/*
219	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
220	 */
221
222	.macro	itlb_11 code
223
224	mfctl	%pcsq, spc
225	b	itlb_miss_11
226	mfctl	%pcoq, va
227
228	.align		32
229	.endm
230#endif
231
232	/*
233	 * itlb miss interruption handler (parisc 2.0)
234	 */
235
236	.macro	itlb_20 code
237	mfctl	%pcsq, spc
238#ifdef __LP64__
239	b       itlb_miss_20w
240#else
241	b	itlb_miss_20
242#endif
243	mfctl	%pcoq, va
244
245	.align		32
246	.endm
247
248#ifndef __LP64__
249	/*
250	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
251	 *
252	 * Note: naitlb misses will be treated
253	 * as an ordinary itlb miss for now.
254	 * However, note that naitlb misses
255	 * have the faulting address in the
256	 * IOR/ISR.
257	 */
258
259	.macro	naitlb_11 code
260
261	mfctl	%isr,spc
262	b	itlb_miss_11
263	mfctl 	%ior,va
264	/* FIXME: If user causes a naitlb miss, the priv level may not be in
265	 * lower bits of va, where the itlb miss handler is expecting them
266	 */
267
268	.align		32
269	.endm
270#endif
271
272	/*
273	 * naitlb miss interruption handler (parisc 2.0)
274	 *
275	 * Note: naitlb misses will be treated
276	 * as an ordinary itlb miss for now.
277	 * However, note that naitlb misses
278	 * have the faulting address in the
279	 * IOR/ISR.
280	 */
281
282	.macro	naitlb_20 code
283
284	mfctl	%isr,spc
285#ifdef __LP64__
286	b       itlb_miss_20w
287#else
288	b	itlb_miss_20
289#endif
290	mfctl 	%ior,va
291	/* FIXME: If user causes a naitlb miss, the priv level may not be in
292	 * lower bits of va, where the itlb miss handler is expecting them
293	 */
294
295	.align		32
296	.endm
297
298#ifndef __LP64__
299	/*
300	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
301	 */
302
303	.macro	dtlb_11 code
304
305	mfctl	%isr, spc
306	b	dtlb_miss_11
307	mfctl	%ior, va
308
309	.align		32
310	.endm
311#endif
312
313	/*
314	 * dtlb miss interruption handler (parisc 2.0)
315	 */
316
317	.macro	dtlb_20 code
318
319	mfctl	%isr, spc
320#ifdef __LP64__
321	b       dtlb_miss_20w
322#else
323	b	dtlb_miss_20
324#endif
325	mfctl	%ior, va
326
327	.align		32
328	.endm
329
330#ifndef __LP64__
331	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
332
333	.macro	nadtlb_11 code
334
335	mfctl	%isr,spc
336	b       nadtlb_miss_11
337	mfctl	%ior,va
338
339	.align		32
340	.endm
341#endif
342
343	/* nadtlb miss interruption handler (parisc 2.0) */
344
345	.macro	nadtlb_20 code
346
347	mfctl	%isr,spc
348#ifdef __LP64__
349	b       nadtlb_miss_20w
350#else
351	b       nadtlb_miss_20
352#endif
353	mfctl	%ior,va
354
355	.align		32
356	.endm
357
358#ifndef __LP64__
359	/*
360	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
361	 */
362
363	.macro	dbit_11 code
364
365	mfctl	%isr,spc
366	b	dbit_trap_11
367	mfctl	%ior,va
368
369	.align		32
370	.endm
371#endif
372
373	/*
374	 * dirty bit trap interruption handler (parisc 2.0)
375	 */
376
377	.macro	dbit_20 code
378
379	mfctl	%isr,spc
380#ifdef __LP64__
381	b       dbit_trap_20w
382#else
383	b	dbit_trap_20
384#endif
385	mfctl	%ior,va
386
387	.align		32
388	.endm
389
390	/*
391	 * Align fault_vector_20 on 4K boundary so that both
392	 * fault_vector_11 and fault_vector_20 are on the
393	 * same page. This is only necessary as long as we
394	 * write protect the kernel text, which we may stop
395	 * doing once we use large page translations to cover
396	 * the static part of the kernel address space.
397	 */
398
399	.export fault_vector_20
400
401	.text
402
403	.align 4096
404
405fault_vector_20:
406	/* First vector is invalid (0) */
407	.ascii	"cows can fly"
408	.byte 0
409	.align 32
410
411	hpmc		 1
412	def		 2
413	def		 3
414	extint		 4
415	def		 5
416	itlb_20		 6
417	def		 7
418	def		 8
419	def              9
420	def		10
421	def		11
422	def		12
423	def		13
424	def		14
425	dtlb_20		15
426#if 0
427	naitlb_20	16
428#else
429	def             16
430#endif
431	nadtlb_20	17
432	def		18
433	def		19
434	dbit_20		20
435	def		21
436	def		22
437	def		23
438	def		24
439	def		25
440	def		26
441	def		27
442	def		28
443	def		29
444	def		30
445	def		31
446
447#ifndef __LP64__
448
449	.export fault_vector_11
450
451	.align 2048
452
453fault_vector_11:
454	/* First vector is invalid (0) */
455	.ascii	"cows can fly"
456	.byte 0
457	.align 32
458
459	hpmc		 1
460	def		 2
461	def		 3
462	extint		 4
463	def		 5
464	itlb_11		 6
465	def		 7
466	def		 8
467	def              9
468	def		10
469	def		11
470	def		12
471	def		13
472	def		14
473	dtlb_11		15
474#if 0
475	naitlb_11	16
476#else
477	def             16
478#endif
479	nadtlb_11	17
480	def		18
481	def		19
482	dbit_11		20
483	def		21
484	def		22
485	def		23
486	def		24
487	def		25
488	def		26
489	def		27
490	def		28
491	def		29
492	def		30
493	def		31
494
495#endif
496
497	.import		handle_interruption,code
498	.import		handle_real_interruption,code
499	.import		do_cpu_irq_mask,code
500	.import		parisc_stopkernel,code
501
502	/*
503	 * r26 = function to be called
504	 * r25 = argument to pass in
505	 * r24 = flags for do_fork()
506	 *
507	 * Kernel threads don't ever return, so they don't need
508	 * a true register context. We just save away the arguments
509	 * for copy_thread/ret_ to properly set up the child.
510	 */
511
512#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
513
514	.export __kernel_thread, code
515	.import do_fork
516__kernel_thread:
517	STREG	%r2, -RP_OFFSET(%r30)
518
519	copy	%r30, %r1
520	ldo	PT_SZ_ALGN(%r30),%r30
521#ifdef __LP64__
522	/* Yo, function pointers in wide mode are little structs... -PB */
523	ldd	24(%r26), %r2
524	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
525	ldd	16(%r26), %r26
526#endif
527	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
528	STREG	%r25, PT_GR25(%r1)
529	ldo	CLONE_VM(%r0), %r26   /* Force CLONE_VM since only init_mm */
530	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
531	copy	%r0, %r25
532#ifdef __LP64__
533	ldo	-16(%r30),%r29		/* Reference param save area */
534#endif
535	bl	do_fork, %r2
536	copy	%r1, %r24
537
538	/* Parent Returns here */
539
540	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
541	bv	%r0(%r2)
542	ldo	-PT_SZ_ALGN(%r30), %r30
543
544	/*
545	 * Child Returns here
546	 *
547	 * copy_thread moved args from temp save area set up above
548	 * into task save area.
549	 */
550
551	.export	ret_from_kernel_thread
552ret_from_kernel_thread:
553
554	/* Call schedule_tail first though */
555	bl	schedule_tail, %r2
556	nop
557
558	LDREG	TASK_PT_GR26-TASK_SZ_ALGN(%r30), %r1
559	LDREG	TASK_PT_GR25-TASK_SZ_ALGN(%r30), %r26
560#ifdef __LP64__
561	LDREG	TASK_PT_GR27-TASK_SZ_ALGN(%r30), %r27
562#endif
563	ble	0(%sr7, %r1)
564	copy	%r31, %r2
565
566#ifdef __LP64__
567	ldo	-16(%r30),%r29		/* Reference param save area */
568	loadgp				/* Thread could have been in a module */
569#endif
570	b	sys_exit
571	ldi	0, %r26
572
573	.import	sys_execve, code
574	.export	__execve, code
575__execve:
576	copy	%r2, %r15
577	copy	%r30, %r16
578	ldo	PT_SZ_ALGN(%r30), %r30
579	STREG	%r26, PT_GR26(%r16)
580	STREG	%r25, PT_GR25(%r16)
581	STREG	%r24, PT_GR24(%r16)
582#ifdef __LP64__
583	ldo	-16(%r30),%r29		/* Reference param save area */
584#endif
585	bl	sys_execve, %r2
586	copy	%r16, %r26
587
588	cmpib,=,n 0,%r28,intr_return    /* forward */
589
590	/* yes, this will trap and die. */
591	copy	%r15, %r2
592	copy	%r16, %r30
593	bv	%r0(%r2)
594	nop
595
596	.align 4
597
598	/*
599	 * struct task_struct *_switch_to(struct task_struct *prev,
600	 *	struct task_struct *next)
601	 *
602	 * switch kernel stacks and return prev */
603	.export	_switch_to, code
604_switch_to:
605	STREG	 %r2, -RP_OFFSET(%r30)
606
607	callee_save
608
609	ldil	L%_switch_to_ret, %r2
610	ldo	R%_switch_to_ret(%r2), %r2
611
612	STREG	%r2, TASK_PT_KPC(%r26)
613	LDREG	TASK_PT_KPC(%r25), %r2
614
615	STREG	%r30, TASK_PT_KSP(%r26)
616	LDREG	TASK_PT_KSP(%r25), %r30
617	bv	%r0(%r2)
618	mtctl   %r25,%cr30
619
620_switch_to_ret:
621	mtctl	%r0, %cr0		/* Needed for single stepping */
622	callee_rest
623
624	LDREG	-RP_OFFSET(%r30), %r2
625	bv	%r0(%r2)
626	copy	%r26, %r28
627
628	/*
629	 * Common rfi return path for interruptions, kernel execve, and
630	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
631	 * return via this path if the signal was received when the process
632	 * was running; if the process was blocked on a syscall then the
633	 * normal syscall_exit path is used.  All syscalls for traced
634	 * proceses exit via intr_restore.
635	 *
636	 * XXX If any syscalls that change a processes space id ever exit
637	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
638	 * adjust IASQ[0..1].
639	 *
640	 * Note that the following code uses a "relied upon translation".
641	 * See the parisc ACD for details. The ssm is necessary due to a
642	 * PCXT bug.
643	 */
644
645	.align 4096
646
647	.export	syscall_exit_rfi
648syscall_exit_rfi:
649	mfctl   %cr30,%r16
650	ldo	TASK_REGS(%r16),%r16
651	/* Force iaoq to userspace, as the user has had access to our current
652	 * context via sigcontext. Also Filter the PSW for the same reason.
653	 */
654	LDREG	PT_IAOQ0(%r16),%r19
655	depi	3,31,2,%r19
656	STREG	%r19,PT_IAOQ0(%r16)
657	LDREG	PT_IAOQ1(%r16),%r19
658	depi	3,31,2,%r19
659	STREG	%r19,PT_IAOQ1(%r16)
660	LDREG   PT_PSW(%r16),%r19
661	ldil    L%USER_PSW_MASK,%r1
662	ldo     R%USER_PSW_MASK(%r1),%r1
663#ifdef __LP64__
664	ldil    L%USER_PSW_HI_MASK,%r20
665	ldo     R%USER_PSW_HI_MASK(%r20),%r20
666	depd    %r20,31,32,%r1
667#endif
668	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
669	ldil    L%USER_PSW,%r1
670	ldo     R%USER_PSW(%r1),%r1
671	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
672	STREG   %r19,PT_PSW(%r16)
673
674	/*
675	 * If we aren't being traced, we never saved space registers
676	 * (we don't store them in the sigcontext), so set them
677	 * to "proper" values now (otherwise we'll wind up restoring
678	 * whatever was last stored in the task structure, which might
679	 * be inconsistant if an interrupt occured while on the gateway
680	 * page) Note that we may be "trashing" values the user put in
681	 * them, but we don't support the the user changing them.
682	 */
683
684	STREG   %r0,PT_SR2(%r16)
685	mfsp    %sr3,%r19
686	STREG   %r19,PT_SR0(%r16)
687	STREG   %r19,PT_SR1(%r16)
688	STREG   %r19,PT_SR3(%r16)
689	STREG   %r19,PT_SR4(%r16)
690	STREG   %r19,PT_SR5(%r16)
691	STREG   %r19,PT_SR6(%r16)
692	STREG   %r19,PT_SR7(%r16)
693
694intr_return:
695	ssm     PSW_SM_I, %r0
696
697	/* Check for software interrupts */
698
699	.import irq_stat,data
700
701	ldil	L%irq_stat,%r19
702	ldo	R%irq_stat(%r19),%r19
703#ifdef CONFIG_SMP
704	mfctl   %cr30,%r1
705	ldw	TASK_PROCESSOR(%r1),%r1 /* get cpu # - int */
706	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
707	** irq_stat[] is defined using ____cacheline_aligned.
708	*/
709#ifdef __LP64__
710	shld	%r1, 6, %r20
711#else
712	shlw	%r1, 5, %r20
713#endif
714	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
715#endif /* CONFIG_SMP */
716
717	LDREG   IRQSTAT_SIRQ_PEND(%r19),%r20    /* hardirq.h: unsigned long */
718	cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
719
720intr_check_resched:
721
722	/* check for reschedule */
723	mfctl   %cr30,%r1
724	LDREG     TASK_NEED_RESCHED(%r1),%r19	/* sched.h: long need_resched */
725	CMPIB<>,n 0,%r19,intr_do_resched /* forward */
726
727intr_check_sig:
728	/* As above */
729	mfctl   %cr30,%r1
730	ldw	TASK_SIGPENDING(%r1),%r19	/* sched.h: int sigpending */
731	cmpib,<>,n 0,%r19,intr_do_signal /* forward */
732
733intr_restore:
734	copy            %r16,%r29
735	ldo             PT_FR31(%r29),%r1
736	rest_fp         %r1
737	rest_general    %r29
738	ssm		0,%r0
739	nop
740	nop
741	nop
742	nop
743	nop
744	nop
745	nop
746	tophys_r1       %r29
747	rsm             (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0
748	rest_specials	%r29
749	rest_stack
750	rfi
751	nop
752	nop
753	nop
754	nop
755	nop
756	nop
757	nop
758	nop
759
760	.import do_softirq,code
761intr_do_softirq:
762#ifdef __LP64__
763	ldo	-16(%r30),%r29		/* Reference param save area */
764#endif
765	ldil	L%intr_check_resched, %r2
766	b	do_softirq
767	ldo	R%intr_check_resched(%r2), %r2
768
769	.import schedule,code
770intr_do_resched:
771	/* Only do reschedule if we are returning to user space */
772	LDREG   PT_IASQ0(%r16), %r20
773	CMPIB= 0,%r20,intr_check_sig /* backward */
774	nop
775	LDREG   PT_IASQ1(%r16), %r20
776	CMPIB= 0,%r20,intr_check_sig /* backward */
777	nop
778
779#ifdef __LP64__
780	ldo	-16(%r30),%r29		/* Reference param save area */
781#endif
782
783	ldil	L%intr_check_sig, %r2
784	b	schedule
785	ldo	R%intr_check_sig(%r2), %r2
786
787
788	.import do_signal,code
789intr_do_signal:
790	/* Only do signals if we are returning to user space */
791	LDREG   PT_IASQ0(%r16), %r20
792	CMPIB= 0,%r20,intr_restore /* backward */
793	nop
794	LDREG   PT_IASQ1(%r16), %r20
795	CMPIB= 0,%r20,intr_restore /* backward */
796	nop
797
798	copy	%r0, %r24			/* unsigned long in_syscall */
799	copy	%r16, %r25			/* struct pt_regs *regs */
800	copy	%r0, %r26			/* sigset_t *oldset = NULL */
801#ifdef __LP64__
802	ldo	-16(%r30),%r29			/* Reference param save area */
803#endif
804	ldil	L%intr_restore, %r2
805	b	do_signal
806	ldo	R%intr_restore(%r2), %r2
807
808
809	/*
810	 * External interrupts.
811	 */
812
813intr_extint:
814	CMPIB=,n 0,%r16,1f	/* on User or kernel stack? */
815	get_stack_use_cr30
816	b,n 3f
817
8181:
819#if 0  /* Interrupt Stack support not working yet! */
820	mfctl	%cr31,%r1
821	copy	%r30,%r17
822	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
823#ifdef __LP64__
824	depdi	0,63,15,%r17
825#else
826	depi	0,31,15,%r17
827#endif
828	CMPB=,n	%r1,%r17,2f
829	get_stack_use_cr31
830	b,n 3f
831#endif
8322:
833	get_stack_use_r30
834
8353:
836	save_specials	%r29
837	virt_map
838	save_general	%r29
839
840	ldo	PT_FR0(%r29), %r24
841	save_fp	%r24
842
843	loadgp
844
845	copy	%r29, %r26	/* arg0 is pt_regs */
846	copy	%r29, %r16	/* save pt_regs */
847	ldil	L%intr_return, %r2
848#ifdef __LP64__
849	ldo	-16(%r30),%r29	/* Reference param save area */
850#endif
851	b	do_cpu_irq_mask
852	ldo	R%intr_return(%r2), %r2
853
854
855
856	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
857
858	.export         intr_save, code /* for os_hpmc */
859
860intr_save:
861	mfsp    %sr7,%r16
862	CMPIB=,n 0,%r16,1f
863	get_stack_use_cr30
864	b	2f
865	copy    %r8,%r26
866
8671:
868	get_stack_use_r30
869	copy    %r8,%r26
870
8712:
872	save_specials	%r29
873
874	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
875
876	/*
877	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
878	 *           traps.c.
879	 *        2) Once we start executing code above 4 Gb, we need
880	 *           to adjust iasq/iaoq here in the same way we
881	 *           adjust isr/ior below.
882	 */
883
884	CMPIB=,n        6,%r26,skip_save_ior
885
886	/* save_specials left ipsw value in r8 for us to test */
887
888	mfctl           %cr20, %r16 /* isr */
889	mfctl           %cr21, %r17 /* ior */
890
891#ifdef __LP64__
892	/*
893	 * If the interrupted code was running with W bit off (32 bit),
894	 * clear the b bits (bits 0 & 1) in the ior.
895	 */
896	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
897	depdi           0,1,2,%r17
898
899	/*
900	 * FIXME: This code has hardwired assumptions about the split
901	 *        between space bits and offset bits. This will change
902	 *        when we allow alternate page sizes.
903	 */
904
905	/* adjust isr/ior. */
906
907	extrd,u         %r16,63,7,%r1    /* get high bits from isr for ior */
908	depd            %r1,31,7,%r17    /* deposit them into ior */
909	depdi           0,63,7,%r16      /* clear them from isr */
910#endif
911	STREG           %r16, PT_ISR(%r29)
912	STREG           %r17, PT_IOR(%r29)
913
914
915skip_save_ior:
916	virt_map
917	save_general	%r29
918
919	ldo		PT_FR0(%r29), %r25
920	save_fp		%r25
921
922	loadgp
923
924	copy		%r29, %r25	/* arg1 is pt_regs */
925
926#ifdef __LP64__
927	ldo		-16(%r30),%r29	/* Reference param save area */
928#endif
929
930	ldil		L%intr_check_sig, %r2
931	copy		%r25, %r16	/* save pt_regs */
932
933	b		handle_interruption
934	ldo		R%intr_check_sig(%r2), %r2
935
936
937	/*
938	 * Note for all tlb miss handlers:
939	 *
940	 * cr24 contains a pointer to the kernel address space
941	 * page directory.
942	 *
943	 * cr25 contains a pointer to the current user address
944	 * space page directory.
945	 *
946	 * sr3 will contain the space id of the user address space
947	 * of the current running thread while that thread is
948	 * running in the kernel.
949	 */
950
951	/*
952	 * register number allocations.  Note that these are all
953	 * in the shadowed registers
954	 */
955
956	t0 = r1		/* temporary register 0 */
957	va = r8		/* virtual address for which the trap occured */
958	t1 = r9		/* temporary register 1 */
959	pte  = r16	/* pte/phys page # */
960	prot = r17	/* prot bits */
961	spc  = r24	/* space for which the trap occured */
962	ptp = r25	/* page directory/page table pointer */
963
964#ifdef __LP64__
965
966dtlb_miss_20w:
967	extrd,u         spc,63,7,t1     /* adjust va */
968	depd            t1,31,7,va      /* adjust va */
969	depdi           0,63,7,spc      /* adjust space */
970	mfctl           %cr25,ptp	/* Assume user space miss */
971	or,*<>          %r0,spc,%r0     /* If it is user space, nullify */
972	mfctl           %cr24,ptp	/* Load kernel pgd instead */
973	extrd,u         va,33,9,t1      /* Get pgd index */
974
975	mfsp            %sr7,t0		/* Get current space */
976	or,*=           %r0,t0,%r0      /* If kernel, nullify following test */
977	cmpb,*<>,n       t0,spc,dtlb_fault /* forward */
978
979	/* First level page table lookup */
980
981	ldd,s           t1(ptp),ptp
982	extrd,u         va,42,9,t0     /* get second-level index */
983	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
984	depdi           0,63,12,ptp     /* clear prot bits */
985
986	/* Second level page table lookup */
987
988	ldd,s           t0(ptp),ptp
989	extrd,u         va,51,9,t0     /* get third-level index */
990	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
991	depdi           0,63,12,ptp     /* clear prot bits */
992
993	/* Third level page table lookup */
994
995	shladd           t0,3,ptp,ptp
996	ldi		_PAGE_ACCESSED,t1
997	ldd              0(ptp),pte
998	bb,>=,n          pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
999
1000	/* Check whether the "accessed" bit was set, otherwise do so */
1001
1002	or		t1,pte,t0	/* t0 has R bit set */
1003	and,*<>         t1,pte,%r0      /* test and nullify if already set */
1004	std             t0,0(ptp)       /* write back pte */
1005
1006	space_to_prot   spc prot        /* create prot id from space */
1007	depd            pte,8,7,prot    /* add in prot bits from pte */
1008
1009	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1010	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1011	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1012	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1013
1014	/* Get rid of prot bits and convert to page addr for idtlbt */
1015
1016	depdi		0,63,12,pte
1017	extrd,u         pte,56,52,pte
1018	idtlbt          pte,prot
1019
1020	rfir
1021	nop
1022
1023dtlb_check_alias_20w:
1024
1025	/* Check to see if fault is in the temporary alias region */
1026
1027	cmpib,*<>,n     0,spc,dtlb_fault /* forward */
1028	ldil            L%(TMPALIAS_MAP_START),t0
1029	copy            va,t1
1030	depdi           0,63,23,t1
1031	cmpb,*<>,n      t0,t1,dtlb_fault /* forward */
1032	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1033	depd,z          prot,8,7,prot
1034
1035	/*
1036	 * OK, it is in the temp alias region, check whether "from" or "to".
1037	 * Check "subtle" note in pacache.S re: r23/r26.
1038	 */
1039
1040	extrd,u,*=      va,41,1,r0
1041	or,*tr          %r23,%r0,pte    /* If "from" use "from" page */
1042	or,*            %r26,%r0,pte    /* else "to", use "to" page  */
1043
1044	idtlbt          pte,prot
1045
1046	rfir
1047	nop
1048
1049nadtlb_miss_20w:
1050	extrd,u         spc,63,7,t1     /* adjust va */
1051	depd            t1,31,7,va      /* adjust va */
1052	depdi           0,63,7,spc      /* adjust space */
1053	mfctl           %cr25,ptp	/* Assume user space miss */
1054	or,*<>          %r0,spc,%r0     /* If it is user space, nullify */
1055	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1056	extrd,u         va,33,9,t1      /* Get pgd index */
1057
1058	mfsp            %sr7,t0		/* Get current space */
1059	or,*=           %r0,t0,%r0      /* If kernel, nullify following test */
1060	cmpb,*<>,n       t0,spc,nadtlb_fault /* forward */
1061
1062	/* First level page table lookup */
1063
1064	ldd,s           t1(ptp),ptp
1065	extrd,u         va,42,9,t0     /* get second-level index */
1066	bb,>=,n         ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
1067	depdi           0,63,12,ptp     /* clear prot bits */
1068
1069	/* Second level page table lookup */
1070
1071	ldd,s           t0(ptp),ptp
1072	extrd,u         va,51,9,t0     /* get third-level index */
1073	bb,>=,n         ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
1074	depdi           0,63,12,ptp     /* clear prot bits */
1075
1076	/* Third level page table lookup */
1077
1078	shladd           t0,3,ptp,ptp
1079	ldi		_PAGE_ACCESSED,t1
1080	ldd              0(ptp),pte
1081	bb,>=,n          pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20w
1082
1083	space_to_prot   spc prot        /* create prot id from space */
1084	depd            pte,8,7,prot    /* add in prot bits from pte */
1085
1086	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1087	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1088	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1089	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1090
1091	/* Get rid of prot bits and convert to page addr for idtlbt */
1092
1093	depdi		0,63,12,pte
1094	extrd,u         pte,56,52,pte
1095	idtlbt          pte,prot
1096
1097	rfir
1098	nop
1099
1100nadtlb_check_flush_20w:
1101	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1102
1103	/* Insert a "flush only" translation */
1104
1105	depdi,z         7,7,3,prot
1106	depdi           1,10,1,prot
1107
1108	/* Get rid of prot bits and convert to page addr for idtlbt */
1109
1110	depdi		0,63,12,pte
1111	extrd,u         pte,56,52,pte
1112	idtlbt          pte,prot
1113
1114	rfir
1115	nop
1116
1117#else
1118
1119dtlb_miss_11:
1120	mfctl           %cr25,ptp	/* Assume user space miss */
1121	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1122	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1123	extru		va,9,10,t1	/* Get pgd index */
1124
1125	mfsp            %sr7,t0		/* Get current space */
1126	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1127	cmpb,<>,n       t0,spc,dtlb_fault /* forward */
1128
1129	/* First level page table lookup */
1130
1131	ldwx,s		t1(ptp),ptp
1132	extru		va,19,10,t0	/* get second-level index */
1133	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_11
1134	depi		0,31,12,ptp	/* clear prot bits */
1135
1136	/* Second level page table lookup */
1137
1138	sh2addl 	 t0,ptp,ptp
1139	ldi		_PAGE_ACCESSED,t1
1140	ldw		 0(ptp),pte
1141	bb,>=,n          pte,_PAGE_PRESENT_BIT,dtlb_check_alias_11
1142
1143	/* Check whether the "accessed" bit was set, otherwise do so */
1144
1145	or		t1,pte,t0	/* t0 has R bit set */
1146	and,<>		t1,pte,%r0	/* test and nullify if already set */
1147	stw		t0,0(ptp)	/* write back pte */
1148
1149	zdep            spc,30,15,prot  /* create prot id from space */
1150	dep             pte,8,7,prot    /* add in prot bits from pte */
1151
1152	extru,=		pte,_PAGE_NO_CACHE_BIT,1,r0
1153	depi		1,12,1,prot
1154	extru,=         pte,_PAGE_USER_BIT,1,r0
1155	depi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1156	extru,= 	pte,_PAGE_GATEWAY_BIT,1,r0
1157	depi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1158
1159	/* Get rid of prot bits and convert to page addr for idtlba */
1160
1161	depi		0,31,12,pte
1162	extru		pte,24,25,pte
1163
1164	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1165	mtsp		spc,%sr1
1166
1167	idtlba		pte,(%sr1,va)
1168	idtlbp		prot,(%sr1,va)
1169
1170	mtsp		t0, %sr1	/* Restore sr1 */
1171
1172	rfir
1173	nop
1174
1175dtlb_check_alias_11:
1176
1177	/* Check to see if fault is in the temporary alias region */
1178
1179	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1180	ldil            L%(TMPALIAS_MAP_START),t0
1181	copy            va,t1
1182	depwi           0,31,23,t1
1183	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1184	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1185	depw,z          prot,8,7,prot
1186
1187	/*
1188	 * OK, it is in the temp alias region, check whether "from" or "to".
1189	 * Check "subtle" note in pacache.S re: r23/r26.
1190	 */
1191
1192	extrw,u,=       va,9,1,r0
1193	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1194	or              %r26,%r0,pte    /* else "to", use "to" page  */
1195
1196	idtlba          pte,(va)
1197	idtlbp          prot,(va)
1198
1199	rfir
1200	nop
1201
1202nadtlb_miss_11:
1203	mfctl           %cr25,ptp	/* Assume user space miss */
1204	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1205	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1206	extru		va,9,10,t1	/* Get pgd index */
1207
1208	mfsp            %sr7,t0		/* Get current space */
1209	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1210	cmpb,<>,n       t0,spc,nadtlb_fault /* forward */
1211
1212	/* First level page table lookup */
1213
1214	ldwx,s		t1(ptp),ptp
1215	extru		va,19,10,t0	/* get second-level index */
1216	bb,>=,n         ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
1217	depi		0,31,12,ptp	/* clear prot bits */
1218
1219	/* Second level page table lookup */
1220
1221	sh2addl 	 t0,ptp,ptp
1222	ldi		_PAGE_ACCESSED,t1
1223	ldw		 0(ptp),pte
1224	bb,>=,n          pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_11
1225
1226	zdep            spc,30,15,prot  /* create prot id from space */
1227	dep             pte,8,7,prot    /* add in prot bits from pte */
1228
1229	extru,=		pte,_PAGE_NO_CACHE_BIT,1,r0
1230	depi		1,12,1,prot
1231	extru,=         pte,_PAGE_USER_BIT,1,r0
1232	depi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1233	extru,= 	pte,_PAGE_GATEWAY_BIT,1,r0
1234	depi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1235
1236	/* Get rid of prot bits and convert to page addr for idtlba */
1237
1238	depi		0,31,12,pte
1239	extru		pte,24,25,pte
1240
1241	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1242	mtsp		spc,%sr1
1243
1244	idtlba		pte,(%sr1,va)
1245	idtlbp		prot,(%sr1,va)
1246
1247	mtsp		t0, %sr1	/* Restore sr1 */
1248
1249	rfir
1250	nop
1251
1252nadtlb_check_flush_11:
1253	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1254
1255	/* Insert a "flush only" translation */
1256
1257	zdepi           7,7,3,prot
1258	depi            1,10,1,prot
1259
1260	/* Get rid of prot bits and convert to page addr for idtlba */
1261
1262	depi		0,31,12,pte
1263	extru		pte,24,25,pte
1264
1265	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1266	mtsp		spc,%sr1
1267
1268	idtlba		pte,(%sr1,va)
1269	idtlbp		prot,(%sr1,va)
1270
1271	mtsp		t0, %sr1	/* Restore sr1 */
1272
1273	rfir
1274	nop
1275
1276dtlb_miss_20:
1277	mfctl           %cr25,ptp	/* Assume user space miss */
1278	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1279	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1280	extru		va,9,10,t1	/* Get pgd index */
1281
1282	mfsp            %sr7,t0		/* Get current space */
1283	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1284	cmpb,<>,n       t0,spc,dtlb_fault /* forward */
1285
1286	/* First level page table lookup */
1287
1288	ldwx,s		t1(ptp),ptp
1289	extru		va,19,10,t0	/* get second-level index */
1290	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20
1291	depi		0,31,12,ptp	/* clear prot bits */
1292
1293	/* Second level page table lookup */
1294
1295	sh2addl 	 t0,ptp,ptp
1296	ldi		_PAGE_ACCESSED,t1
1297	ldw		 0(ptp),pte
1298	bb,>=,n          pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20
1299
1300	/* Check whether the "accessed" bit was set, otherwise do so */
1301
1302	or		t1,pte,t0	/* t0 has R bit set */
1303	and,<>		t1,pte,%r0	/* test and nullify if already set */
1304	stw		t0,0(ptp)	/* write back pte */
1305
1306	space_to_prot   spc prot        /* create prot id from space */
1307	depd            pte,8,7,prot    /* add in prot bits from pte */
1308
1309	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1310	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1311	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1312	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1313
1314	/* Get rid of prot bits and convert to page addr for idtlbt */
1315
1316	extrd,s         pte,35,4,t1
1317	depdi		0,63,12,pte	/* clear lower 12 bits */
1318        addi,=          1,t1,0
1319        extrd,u,*tr     pte,56,25,pte
1320	extrd,s		pte,56,25,pte	/* bit 31:8 >> 8  */
1321	idtlbt          pte,prot
1322
1323	rfir
1324	nop
1325
1326dtlb_check_alias_20:
1327
1328	/* Check to see if fault is in the temporary alias region */
1329
1330	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1331	ldil            L%(TMPALIAS_MAP_START),t0
1332	copy            va,t1
1333	depwi           0,31,23,t1
1334	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1335	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1336	depd,z          prot,8,7,prot
1337
1338	/*
1339	 * OK, it is in the temp alias region, check whether "from" or "to".
1340	 * Check "subtle" note in pacache.S re: r23/r26.
1341	 */
1342
1343	extrw,u,=       va,9,1,r0
1344	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1345	or              %r26,%r0,pte    /* else "to", use "to" page  */
1346
1347	idtlbt          pte,prot
1348
1349	rfir
1350	nop
1351
1352nadtlb_miss_20:
1353	mfctl           %cr25,ptp	/* Assume user space miss */
1354	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1355	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1356	extru		va,9,10,t1	/* Get pgd index */
1357
1358	mfsp            %sr7,t0		/* Get current space */
1359	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1360	cmpb,<>,n       t0,spc,nadtlb_fault /* forward */
1361
1362	/* First level page table lookup */
1363
1364	ldwx,s		t1(ptp),ptp
1365	extru		va,19,10,t0	/* get second-level index */
1366	bb,>=,n         ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
1367	depi		0,31,12,ptp	/* clear prot bits */
1368
1369	/* Second level page table lookup */
1370
1371	sh2addl 	 t0,ptp,ptp
1372	ldi		_PAGE_ACCESSED,t1
1373	ldw		 0(ptp),pte
1374	bb,>=,n          pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20
1375
1376	space_to_prot   spc prot        /* create prot id from space */
1377	depd            pte,8,7,prot    /* add in prot bits from pte */
1378
1379	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1380	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1381	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1382	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1383
1384	/* Get rid of prot bits and convert to page addr for idtlbt */
1385
1386        extrd,s         pte,35,4,t1
1387        depdi           0,63,12,pte     /* clear lower 12 bits */
1388        addi,=          1,t1,0
1389        extrd,u,*tr     pte,56,25,pte
1390        extrd,s         pte,56,25,pte   /* bit 31:8 >> 8  */
1391        idtlbt          pte,prot
1392
1393	rfir
1394	nop
1395
1396nadtlb_check_flush_20:
1397	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1398
1399	/* Insert a "flush only" translation */
1400
1401	depdi,z         7,7,3,prot
1402	depdi           1,10,1,prot
1403
1404	/* Get rid of prot bits and convert to page addr for idtlbt */
1405
1406	depdi		0,63,12,pte
1407	extrd,u         pte,56,32,pte
1408	idtlbt          pte,prot
1409
1410	rfir
1411	nop
1412#endif
1413
1414nadtlb_emulate:
1415
1416	/*
1417	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1418	 * probei instructions. We don't want to fault for these
1419	 * instructions (not only does it not make sense, it can cause
1420	 * deadlocks, since some flushes are done with the mmap
1421	 * semaphore held). If the translation doesn't exist, we can't
1422	 * insert a translation, so have to emulate the side effects
1423	 * of the instruction. Since we don't insert a translation
1424	 * we can get a lot of faults during a flush loop, so it makes
1425	 * sense to try to do it here with minimum overhead. We only
1426	 * emulate fdc,fic & pdc instructions whose base and index
1427	 * registers are not shadowed. We defer everything else to the
1428	 * "slow" path.
1429	 */
1430
1431	mfctl           %cr19,%r9 /* Get iir */
1432	ldi             0x280,%r16
1433	and             %r9,%r16,%r17
1434	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */
1435	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1436	b,l             get_register,%r25
1437	extrw,u         %r9,15,5,%r8           /* Get index register # */
1438	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1439	copy            %r1,%r24
1440	b,l             get_register,%r25
1441	extrw,u         %r9,10,5,%r8           /* Get base register # */
1442	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1443	b,l             set_register,%r25
1444	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1445
1446nadtlb_nullify:
1447	mfctl           %cr22,%r8              /* Get ipsw */
1448	ldil            L%PSW_N,%r9
1449	or              %r8,%r9,%r8            /* Set PSW_N */
1450	mtctl           %r8,%cr22
1451
1452	rfir
1453	nop
1454
1455#ifdef __LP64__
1456itlb_miss_20w:
1457
1458	/*
1459	 * I miss is a little different, since we allow users to fault
1460	 * on the gateway page which is in the kernel address space.
1461	 */
1462
1463	extrd,u         spc,63,7,t1     /* adjust va */
1464	depd            t1,31,7,va      /* adjust va */
1465	depdi           0,63,7,spc      /* adjust space */
1466	cmpib,*=        0,spc,itlb_miss_kernel_20w
1467	extrd,u         va,33,9,t1      /* Get pgd index */
1468
1469	mfctl           %cr25,ptp	/* load user pgd */
1470
1471	mfsp            %sr7,t0		/* Get current space */
1472	or,*=           %r0,t0,%r0      /* If kernel, nullify following test */
1473	cmpb,*<>,n      t0,spc,itlb_fault /* forward */
1474
1475	/* First level page table lookup */
1476
1477itlb_miss_common_20w:
1478	ldd,s           t1(ptp),ptp
1479	extrd,u         va,42,9,t0     /* get second-level index */
1480	bb,>=,n 	ptp,_PAGE_PRESENT_BIT,itlb_fault
1481	depdi           0,63,12,ptp     /* clear prot bits */
1482
1483	/* Second level page table lookup */
1484
1485	ldd,s           t0(ptp),ptp
1486	extrd,u         va,51,9,t0     /* get third-level index */
1487	bb,>=,n         ptp,_PAGE_PRESENT_BIT,itlb_fault
1488	depdi           0,63,12,ptp     /* clear prot bits */
1489
1490	/* Third level page table lookup */
1491
1492	shladd           t0,3,ptp,ptp
1493	ldi		_PAGE_ACCESSED,t1
1494	ldd              0(ptp),pte
1495	bb,>=,n          pte,_PAGE_PRESENT_BIT,itlb_fault
1496
1497	/* Check whether the "accessed" bit was set, otherwise do so */
1498
1499	or		t1,pte,t0	/* t0 has R bit set */
1500	and,*<>         t1,pte,%r0      /* test and nullify if already set */
1501	std             t0,0(ptp)       /* write back pte */
1502
1503	space_to_prot   spc prot        /* create prot id from space */
1504	depd            pte,8,7,prot    /* add in prot bits from pte */
1505
1506	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1507	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1508	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1509	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1510
1511	/* Get rid of prot bits and convert to page addr for iitlbt */
1512
1513	depdi		0,63,12,pte
1514	extrd,u         pte,56,32,pte
1515	iitlbt          pte,prot
1516
1517	rfir
1518	nop
1519
1520itlb_miss_kernel_20w:
1521	b               itlb_miss_common_20w
1522	mfctl           %cr24,ptp	/* Load kernel pgd */
1523#else
1524
1525itlb_miss_11:
1526
1527	/*
1528	 * I miss is a little different, since we allow users to fault
1529	 * on the gateway page which is in the kernel address space.
1530	 */
1531
1532	cmpib,=         0,spc,itlb_miss_kernel_11
1533	extru		va,9,10,t1	/* Get pgd index */
1534
1535	mfctl           %cr25,ptp	/* load user pgd */
1536
1537	mfsp            %sr7,t0		/* Get current space */
1538	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1539	cmpb,<>,n       t0,spc,itlb_fault /* forward */
1540
1541	/* First level page table lookup */
1542
1543itlb_miss_common_11:
1544	ldwx,s		t1(ptp),ptp
1545	extru		va,19,10,t0	/* get second-level index */
1546	bb,>=,n 	ptp,_PAGE_PRESENT_BIT,itlb_fault
1547	depi		0,31,12,ptp	/* clear prot bits */
1548
1549	/* Second level page table lookup */
1550
1551	sh2addl 	 t0,ptp,ptp
1552	ldi		_PAGE_ACCESSED,t1
1553	ldw		 0(ptp),pte
1554	bb,>=,n 	 pte,_PAGE_PRESENT_BIT,itlb_fault
1555
1556	/* Check whether the "accessed" bit was set, otherwise do so */
1557
1558	or		t1,pte,t0	/* t0 has R bit set */
1559	and,<>		t1,pte,%r0	/* test and nullify if already set */
1560	stw		t0,0(ptp)	/* write back pte */
1561
1562	zdep            spc,30,15,prot  /* create prot id from space */
1563	dep             pte,8,7,prot    /* add in prot bits from pte */
1564
1565	extru,=		pte,_PAGE_NO_CACHE_BIT,1,r0
1566	depi		1,12,1,prot
1567	extru,=         pte,_PAGE_USER_BIT,1,r0
1568	depi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1569	extru,= 	pte,_PAGE_GATEWAY_BIT,1,r0
1570	depi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1571
1572	/* Get rid of prot bits and convert to page addr for iitlba */
1573
1574	depi		0,31,12,pte
1575	extru		pte,24,25,pte
1576
1577	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1578	mtsp		spc,%sr1
1579
1580	iitlba		pte,(%sr1,va)
1581	iitlbp		prot,(%sr1,va)
1582
1583	mtsp		t0, %sr1	/* Restore sr1 */
1584
1585	rfir
1586	nop
1587
1588itlb_miss_kernel_11:
1589	b               itlb_miss_common_11
1590	mfctl           %cr24,ptp	/* Load kernel pgd */
1591
1592itlb_miss_20:
1593
1594	/*
1595	 * I miss is a little different, since we allow users to fault
1596	 * on the gateway page which is in the kernel address space.
1597	 */
1598
1599	cmpib,=         0,spc,itlb_miss_kernel_20
1600	extru		va,9,10,t1	/* Get pgd index */
1601
1602	mfctl           %cr25,ptp	/* load user pgd */
1603
1604	mfsp            %sr7,t0		/* Get current space */
1605	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1606	cmpb,<>,n       t0,spc,itlb_fault /* forward */
1607
1608	/* First level page table lookup */
1609
1610itlb_miss_common_20:
1611	ldwx,s		t1(ptp),ptp
1612	extru		va,19,10,t0	/* get second-level index */
1613	bb,>=,n 	ptp,_PAGE_PRESENT_BIT,itlb_fault
1614	depi		0,31,12,ptp	/* clear prot bits */
1615
1616	/* Second level page table lookup */
1617
1618	sh2addl 	 t0,ptp,ptp
1619	ldi		_PAGE_ACCESSED,t1
1620	ldw		 0(ptp),pte
1621	bb,>=,n 	 pte,_PAGE_PRESENT_BIT,itlb_fault
1622
1623	/* Check whether the "accessed" bit was set, otherwise do so */
1624
1625	or		t1,pte,t0	/* t0 has R bit set */
1626	and,<>		t1,pte,%r0	/* test and nullify if already set */
1627	stw		t0,0(ptp)	/* write back pte */
1628
1629	space_to_prot   spc prot        /* create prot id from space */
1630	depd            pte,8,7,prot    /* add in prot bits from pte */
1631
1632	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1633	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1634	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1635	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1636
1637	/* Get rid of prot bits and convert to page addr for iitlbt */
1638
1639        extrd,s         pte,35,4,t1
1640        depdi           0,63,12,pte     /* clear lower 12 bits */
1641        addi,=          1,t1,0
1642        extrd,u,*tr     pte,56,25,pte
1643        extrd,s         pte,56,25,pte   /* bit 31:8 >> 8  */
1644	iitlbt          pte,prot
1645
1646	rfir
1647	nop
1648
1649
1650itlb_miss_kernel_20:
1651	b               itlb_miss_common_20
1652	mfctl           %cr24,ptp	/* Load kernel pgd */
1653#endif
1654
1655#ifdef __LP64__
1656
1657dbit_trap_20w:
1658	extrd,u         spc,63,7,t1     /* adjust va */
1659	depd            t1,31,7,va      /* adjust va */
1660	depdi           0,1,2,va        /* adjust va */
1661	depdi           0,63,7,spc      /* adjust space */
1662	mfctl           %cr25,ptp	/* Assume user space miss */
1663	or,*<>          %r0,spc,%r0     /* If it is user space, nullify */
1664	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1665	extrd,u         va,33,9,t1      /* Get pgd index */
1666
1667	mfsp            %sr7,t0		/* Get current space */
1668	or,*=           %r0,t0,%r0      /* If kernel, nullify following test */
1669	cmpb,*<>,n       t0,spc,dbit_fault /* forward */
1670
1671	/* First level page table lookup */
1672
1673	ldd,s           t1(ptp),ptp
1674	extrd,u         va,42,9,t0     /* get second-level index */
1675	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dbit_fault
1676	depdi           0,63,12,ptp     /* clear prot bits */
1677
1678	/* Second level page table lookup */
1679
1680	ldd,s           t0(ptp),ptp
1681	extrd,u         va,51,9,t0     /* get third-level index */
1682	bb,>=,n         ptp,_PAGE_PRESENT_BIT,dbit_fault
1683	depdi           0,63,12,ptp     /* clear prot bits */
1684
1685	/* Third level page table lookup */
1686
1687	shladd           t0,3,ptp,ptp
1688#ifdef CONFIG_SMP
1689	CMPIB=,n        0,spc,dbit_nolock_20w
1690	ldil            L%PA(pa_dbit_lock),t0
1691	ldo             R%PA(pa_dbit_lock)(t0),t0
1692
1693dbit_spin_20w:
1694	ldcw            0(t0),t1
1695	cmpib,=         0,t1,dbit_spin_20w
1696	nop
1697
1698dbit_nolock_20w:
1699#endif
1700	ldi		(_PAGE_ACCESSED|_PAGE_DIRTY),t1
1701	ldd              0(ptp),pte
1702	bb,>=,n          pte,_PAGE_PRESENT_BIT,dbit_fault
1703
1704	/* Set Accessed and Dirty bits in the pte */
1705
1706	or		t1,pte,pte
1707	std             pte,0(ptp)      /* write back pte */
1708
1709	space_to_prot   spc prot        /* create prot id from space */
1710	depd            pte,8,7,prot    /* add in prot bits from pte */
1711
1712	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1713	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1714	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1715	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1716
1717	/* Get rid of prot bits and convert to page addr for idtlbt */
1718
1719	depdi		0,63,12,pte
1720	extrd,u         pte,56,52,pte
1721	idtlbt          pte,prot
1722#ifdef CONFIG_SMP
1723	CMPIB=,n        0,spc,dbit_nounlock_20w
1724	ldi             1,t1
1725	stw             t1,0(t0)
1726
1727dbit_nounlock_20w:
1728#endif
1729
1730	rfir
1731	nop
1732#else
1733
1734dbit_trap_11:
1735	mfctl           %cr25,ptp	/* Assume user space trap */
1736	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1737	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1738	extru		va,9,10,t1	/* Get pgd index */
1739
1740	mfsp            %sr7,t0		/* Get current space */
1741	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1742	cmpb,<>,n       t0,spc,dbit_fault /* forward */
1743
1744	/* First level page table lookup */
1745
1746	ldwx,s		t1(ptp),ptp
1747	extru		va,19,10,t0	/* get second-level index */
1748	bb,>=,n 	ptp,_PAGE_PRESENT_BIT,dbit_fault
1749	depi		0,31,12,ptp	/* clear prot bits */
1750
1751	/* Second level page table lookup */
1752
1753	sh2addl 	 t0,ptp,ptp
1754#ifdef CONFIG_SMP
1755	CMPIB=,n        0,spc,dbit_nolock_11
1756	ldil            L%PA(pa_dbit_lock),t0
1757	ldo             R%PA(pa_dbit_lock)(t0),t0
1758
1759dbit_spin_11:
1760	ldcw            0(t0),t1
1761	cmpib,=         0,t1,dbit_spin_11
1762	nop
1763
1764dbit_nolock_11:
1765#endif
1766	ldi		(_PAGE_ACCESSED|_PAGE_DIRTY),t1
1767	ldw		 0(ptp),pte
1768	bb,>=,n 	 pte,_PAGE_PRESENT_BIT,dbit_fault
1769
1770	/* Set Accessed and Dirty bits in the pte */
1771
1772	or		t1,pte,pte
1773	stw		pte,0(ptp)	/* write back pte */
1774
1775	zdep            spc,30,15,prot  /* create prot id from space */
1776	dep             pte,8,7,prot    /* add in prot bits from pte */
1777
1778	extru,=		pte,_PAGE_NO_CACHE_BIT,1,r0
1779	depi		1,12,1,prot
1780	extru,=         pte,_PAGE_USER_BIT,1,r0
1781	depi		7,11,3,prot /* Set for user space (1 rsvd for read) */
1782	extru,= 	pte,_PAGE_GATEWAY_BIT,1,r0
1783	depi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1784
1785	/* Get rid of prot bits and convert to page addr for idtlba */
1786
1787	depi		0,31,12,pte
1788	extru		pte,24,25,pte
1789
1790	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1791	mtsp		spc,%sr1
1792
1793	idtlba		pte,(%sr1,va)
1794	idtlbp		prot,(%sr1,va)
1795
1796	mtsp            t1, %sr1     /* Restore sr1 */
1797#ifdef CONFIG_SMP
1798	CMPIB=,n        0,spc,dbit_nounlock_11
1799	ldi             1,t1
1800	stw             t1,0(t0)
1801
1802dbit_nounlock_11:
1803#endif
1804
1805	rfir
1806	nop
1807
1808dbit_trap_20:
1809	mfctl           %cr25,ptp	/* Assume user space trap */
1810	or,<>           %r0,spc,%r0	/* If it is user space, nullify */
1811	mfctl           %cr24,ptp	/* Load kernel pgd instead */
1812	extru		va,9,10,t1	/* Get pgd index */
1813
1814	mfsp            %sr7,t0		/* Get current space */
1815	or,=            %r0,t0,%r0	/* If kernel, nullify following test */
1816	cmpb,<>,n       t0,spc,dbit_fault /* forward */
1817
1818	/* First level page table lookup */
1819
1820	ldwx,s		t1(ptp),ptp
1821	extru		va,19,10,t0	/* get second-level index */
1822	bb,>=,n 	ptp,_PAGE_PRESENT_BIT,dbit_fault
1823	depi		0,31,12,ptp	/* clear prot bits */
1824
1825	/* Second level page table lookup */
1826
1827	sh2addl 	 t0,ptp,ptp
1828#ifdef CONFIG_SMP
1829	CMPIB=,n        0,spc,dbit_nolock_20
1830	ldil            L%PA(pa_dbit_lock),t0
1831	ldo             R%PA(pa_dbit_lock)(t0),t0
1832
1833dbit_spin_20:
1834	ldcw            0(t0),t1
1835	cmpib,=         0,t1,dbit_spin_20
1836	nop
1837
1838dbit_nolock_20:
1839#endif
1840	ldi		(_PAGE_ACCESSED|_PAGE_DIRTY),t1
1841	ldw		 0(ptp),pte
1842	bb,>=,n 	 pte,_PAGE_PRESENT_BIT,dbit_fault
1843
1844	/* Set Accessed and Dirty bits in the pte */
1845
1846	or		t1,pte,pte
1847	stw		pte,0(ptp)	/* write back pte */
1848
1849	space_to_prot   spc prot        /* create prot id from space */
1850	depd            pte,8,7,prot    /* add in prot bits from pte */
1851
1852	extrd,u,*=      pte,_PAGE_USER_BIT+32,1,r0
1853	depdi		7,11,3,prot   /* Set for user space (1 rsvd for read) */
1854	extrd,u,*= 	pte,_PAGE_GATEWAY_BIT+32,1,r0
1855	depdi		0,11,2,prot	/* If Gateway, Set PL2 to 0 */
1856
1857        extrd,s         pte,35,4,t1
1858        depdi           0,63,12,pte     /* clear lower 12 bits */
1859        addi,=          1,t1,0
1860        extrd,u,*tr     pte,56,25,pte
1861        extrd,s         pte,56,25,pte   /* bit 31:8 >> 8  */
1862        idtlbt          pte,prot
1863
1864#ifdef CONFIG_SMP
1865	CMPIB=,n        0,spc,dbit_nounlock_20
1866	ldi             1,t1
1867	stw             t1,0(t0)
1868
1869dbit_nounlock_20:
1870#endif
1871
1872	rfir
1873	nop
1874#endif
1875
1876	.import handle_interruption,code
1877
1878kernel_bad_space:
1879	b               intr_save
1880	ldi             31,%r8  /* Use an unused code */
1881
1882dbit_fault:
1883	b               intr_save
1884	ldi             20,%r8
1885
1886itlb_fault:
1887	b               intr_save
1888	ldi             6,%r8
1889
1890nadtlb_fault:
1891	b               intr_save
1892	ldi             17,%r8
1893
1894dtlb_fault:
1895	b               intr_save
1896	ldi             15,%r8
1897
1898	/* Register saving semantics for system calls:
1899
1900	   %r1		   clobbered by system call macro in userspace
1901	   %r2		   saved in PT_REGS by gateway page
1902	   %r3  - %r18	   preserved by C code (saved by signal code)
1903	   %r19 - %r20	   saved in PT_REGS by gateway page
1904	   %r21 - %r22	   non-standard syscall args
1905			   stored in kernel stack by gateway page
1906	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1907	   %r27 - %r30	   saved in PT_REGS by gateway page
1908	   %r31		   syscall return pointer
1909	 */
1910
1911	/* Floating point registers (FIXME: what do we do with these?)
1912
1913	   %fr0  - %fr3	   status/exception, not preserved
1914	   %fr4  - %fr7	   arguments
1915	   %fr8	 - %fr11   not preserved by C code
1916	   %fr12 - %fr21   preserved by C code
1917	   %fr22 - %fr31   not preserved by C code
1918	 */
1919
1920	.macro	reg_save regs
1921	STREG	%r3, PT_GR3(\regs)
1922	STREG	%r4, PT_GR4(\regs)
1923	STREG	%r5, PT_GR5(\regs)
1924	STREG	%r6, PT_GR6(\regs)
1925	STREG	%r7, PT_GR7(\regs)
1926	STREG	%r8, PT_GR8(\regs)
1927	STREG	%r9, PT_GR9(\regs)
1928	STREG   %r10,PT_GR10(\regs)
1929	STREG   %r11,PT_GR11(\regs)
1930	STREG   %r12,PT_GR12(\regs)
1931	STREG   %r13,PT_GR13(\regs)
1932	STREG   %r14,PT_GR14(\regs)
1933	STREG   %r15,PT_GR15(\regs)
1934	STREG   %r16,PT_GR16(\regs)
1935	STREG   %r17,PT_GR17(\regs)
1936	STREG   %r18,PT_GR18(\regs)
1937	.endm
1938
1939	.macro	reg_restore regs
1940	LDREG	PT_GR3(\regs), %r3
1941	LDREG	PT_GR4(\regs), %r4
1942	LDREG	PT_GR5(\regs), %r5
1943	LDREG	PT_GR6(\regs), %r6
1944	LDREG	PT_GR7(\regs), %r7
1945	LDREG	PT_GR8(\regs), %r8
1946	LDREG	PT_GR9(\regs), %r9
1947	LDREG   PT_GR10(\regs),%r10
1948	LDREG   PT_GR11(\regs),%r11
1949	LDREG   PT_GR12(\regs),%r12
1950	LDREG   PT_GR13(\regs),%r13
1951	LDREG   PT_GR14(\regs),%r14
1952	LDREG   PT_GR15(\regs),%r15
1953	LDREG   PT_GR16(\regs),%r16
1954	LDREG   PT_GR17(\regs),%r17
1955	LDREG   PT_GR18(\regs),%r18
1956	.endm
1957
1958	.export sys_fork_wrapper
1959	.export child_return
1960sys_fork_wrapper:
1961	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1	/* get pt regs */
1962	reg_save %r1
1963	mfctl	%cr27, %r3
1964	STREG	%r3, PT_CR27(%r1)
1965
1966	STREG	%r2,-RP_OFFSET(%r30)
1967	ldo	FRAME_SIZE(%r30),%r30
1968#ifdef __LP64__
1969	ldo	-16(%r30),%r29		/* Reference param save area */
1970#endif
1971
1972	/* These are call-clobbered registers and therefore
1973	   also syscall-clobbered (we hope). */
1974	STREG	%r2,PT_GR19(%r1)	/* save for child */
1975	STREG	%r30,PT_GR21(%r1)
1976
1977	LDREG	PT_GR30(%r1),%r25
1978	copy	%r1,%r24
1979	bl	sys_clone,%r2
1980	ldi	SIGCHLD,%r26
1981
1982	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1983wrapper_exit:
1984	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1985	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1    /* get pt regs */
1986
1987	LDREG	PT_CR27(%r1), %r3
1988	mtctl	%r3, %cr27
1989	reg_restore %r1
1990
1991	/* strace expects syscall # to be preserved in r20 */
1992	ldi	__NR_fork,%r20
1993	bv %r0(%r2)
1994	STREG	%r20,PT_GR20(%r1)
1995
1996	/* Set the return value for the child */
1997child_return:
1998	bl	schedule_tail, %r2
1999	nop
2000
2001	LDREG	TASK_PT_GR19-TASK_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30),%r2
2002	b	wrapper_exit
2003	copy	%r0,%r28
2004
2005
2006	.export sys_clone_wrapper
2007sys_clone_wrapper:
2008	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1	/* get pt regs */
2009	reg_save %r1
2010	mfctl	%cr27, %r3
2011	STREG	%r3, PT_CR27(%r1)
2012
2013	STREG	%r2,-RP_OFFSET(%r30)
2014	ldo	FRAME_SIZE(%r30),%r30
2015#ifdef __LP64__
2016	ldo	-16(%r30),%r29		/* Reference param save area */
2017#endif
2018
2019	STREG	%r2,PT_GR19(%r1)	/* save for child */
2020	STREG	%r30,PT_GR21(%r1)
2021	bl	sys_clone,%r2
2022	copy	%r1,%r24
2023
2024	b	wrapper_exit
2025	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
2026
2027	.export sys_vfork_wrapper
2028sys_vfork_wrapper:
2029	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1	/* get pt regs */
2030	reg_save %r1
2031	mfctl	%cr27, %r3
2032	STREG	%r3, PT_CR27(%r1)
2033
2034	STREG	%r2,-RP_OFFSET(%r30)
2035	ldo	FRAME_SIZE(%r30),%r30
2036#ifdef __LP64__
2037	ldo	-16(%r30),%r29		/* Reference param save area */
2038#endif
2039
2040	STREG	%r2,PT_GR19(%r1)	/* save for child */
2041	STREG	%r30,PT_GR21(%r1)
2042
2043	bl	sys_vfork,%r2
2044	copy	%r1,%r26
2045
2046	b	wrapper_exit
2047	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
2048
2049
2050	.macro  execve_wrapper execve
2051	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1	    /* get pt regs */
2052
2053	/*
2054	 * Do we need to save/restore r3-r18 here?
2055	 * I don't think so. why would new thread need old
2056	 * threads registers?
2057	 */
2058
2059	/* %arg0 - %arg3 are already saved for us. */
2060
2061	STREG %r2,-RP_OFFSET(%r30)
2062	ldo FRAME_SIZE(%r30),%r30
2063#ifdef __LP64__
2064	ldo	-16(%r30),%r29		/* Reference param save area */
2065#endif
2066	bl \execve,%r2
2067	copy %r1,%arg0
2068
2069	ldo -FRAME_SIZE(%r30),%r30
2070	LDREG -RP_OFFSET(%r30),%r2
2071
2072	/* If exec succeeded we need to load the args */
2073
2074	ldo -1024(%r0),%r1
2075	cmpb,>>= %r28,%r1,error_\execve
2076	copy %r2,%r19
2077
2078error_\execve:
2079	bv %r0(%r19)
2080	nop
2081	.endm
2082
2083	.export sys_execve_wrapper
2084	.import sys_execve
2085
2086sys_execve_wrapper:
2087	execve_wrapper sys_execve
2088
2089#ifdef __LP64__
2090	.export sys32_execve_wrapper
2091	.import sys32_execve
2092
2093sys32_execve_wrapper:
2094	execve_wrapper sys32_execve
2095#endif
2096
2097	.export sys_rt_sigreturn_wrapper
2098sys_rt_sigreturn_wrapper:
2099	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30), %r26
2100	/* Don't save regs, we are going to restore them from sigcontext. */
2101	STREG	%r2, -RP_OFFSET(%r30)
2102#ifdef __LP64__
2103	ldo	FRAME_SIZE(%r30), %r30
2104	bl	sys_rt_sigreturn,%r2
2105	ldo	-16(%r30),%r29		/* Reference param save area */
2106#else
2107	bl	sys_rt_sigreturn,%r2
2108	ldo	FRAME_SIZE(%r30), %r30
2109#endif
2110
2111	ldo	-FRAME_SIZE(%r30), %r30
2112	LDREG	-RP_OFFSET(%r30), %r2
2113
2114	/* FIXME: I think we need to restore a few more things here. */
2115	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1	    /* get pt regs */
2116	reg_restore %r1
2117
2118	/* If the signal was received while the process was blocked on a
2119	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
2120	 * take us to syscall_exit_rfi and on to intr_return.
2121	 */
2122	bv	%r0(%r2)
2123	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
2124
2125	.export sys_sigaltstack_wrapper
2126sys_sigaltstack_wrapper:
2127	/* Get the user stack pointer */
2128	LDREG	-TASK_SZ_ALGN-FRAME_SIZE+TASK_PT_GR30(%r30), %r24
2129	STREG	%r2, -RP_OFFSET(%r30)
2130#ifdef __LP64__
2131	ldo	FRAME_SIZE(%r30), %r30
2132	bl	do_sigaltstack,%r2
2133	ldo	-16(%r30),%r29		/* Reference param save area */
2134#else
2135	bl	do_sigaltstack,%r2
2136	ldo	FRAME_SIZE(%r30), %r30
2137#endif
2138
2139	ldo	-FRAME_SIZE(%r30), %r30
2140	LDREG	-RP_OFFSET(%r30), %r2
2141	bv	%r0(%r2)
2142	nop
2143
2144#ifdef __LP64__
2145	.export sys32_sigaltstack_wrapper
2146sys32_sigaltstack_wrapper:
2147	/* Get the user stack pointer */
2148	LDREG	-TASK_SZ_ALGN-FRAME_SIZE+TASK_PT_GR30(%r30), %r24
2149	STREG	%r2, -RP_OFFSET(%r30)
2150	ldo	FRAME_SIZE(%r30), %r30
2151	bl	do_sigaltstack32,%r2
2152	ldo	-16(%r30),%r29		/* Reference param save area */
2153
2154	ldo	-FRAME_SIZE(%r30), %r30
2155	LDREG	-RP_OFFSET(%r30), %r2
2156	bv	%r0(%r2)
2157	nop
2158#endif
2159
2160	.export sys_rt_sigsuspend_wrapper
2161sys_rt_sigsuspend_wrapper:
2162	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30), %r24
2163	reg_save %r24
2164
2165	STREG	%r2, -RP_OFFSET(%r30)
2166#ifdef __LP64__
2167	ldo	FRAME_SIZE(%r30), %r30
2168	bl	sys_rt_sigsuspend,%r2
2169	ldo	-16(%r30),%r29		/* Reference param save area */
2170#else
2171	bl	sys_rt_sigsuspend,%r2
2172	ldo	FRAME_SIZE(%r30), %r30
2173#endif
2174
2175	ldo	-FRAME_SIZE(%r30), %r30
2176	LDREG	-RP_OFFSET(%r30), %r2
2177
2178	ldo	TASK_REGS-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1
2179	reg_restore %r1
2180
2181	bv	%r0(%r2)
2182	nop
2183
2184	.export syscall_exit
2185syscall_exit:
2186	/* NOTE: HP-UX syscalls also come through here
2187	   after hpux_syscall_exit fixes up return
2188	   values. */
2189	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
2190	 * via syscall_exit_rfi if the signal was received while the process
2191	 * was running.
2192	 */
2193
2194	/* save return value now */
2195
2196	STREG     %r28,TASK_PT_GR28-TASK_SZ_ALGN-FRAME_SIZE(%r30)
2197
2198	/* Save other hpux returns if personality is PER_HPUX */
2199
2200#define PER_HPUX 0xe /* <linux/personality.h> cannot be easily included */
2201
2202	LDREG     TASK_PERSONALITY-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19
2203	CMPIB<>,n PER_HPUX,%r19,1f
2204	STREG     %r22,TASK_PT_GR22-TASK_SZ_ALGN-FRAME_SIZE(%r30)
2205	STREG     %r29,TASK_PT_GR29-TASK_SZ_ALGN-FRAME_SIZE(%r30)
22061:
2207
2208	/* Seems to me that dp could be wrong here, if the syscall involved
2209	 * calling a module, and nothing got round to restoring dp on return.
2210	 */
2211	loadgp
2212
2213syscall_check_bh:
2214
2215	/* Check for software interrupts */
2216
2217	.import irq_stat,data
2218
2219	ldil    L%irq_stat,%r19
2220	ldo     R%irq_stat(%r19),%r19
2221
2222#ifdef CONFIG_SMP
2223	/* sched.h: int processor */
2224	/* %r26 is used as scratch register to index into irq_stat[] */
2225	ldw     TASK_PROCESSOR-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2226
2227	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2228#ifdef __LP64__
2229	shld	%r26, 6, %r20
2230#else
2231	shlw	%r26, 5, %r20
2232#endif
2233	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
2234#endif /* CONFIG_SMP */
2235
2236	LDREG   IRQSTAT_SIRQ_PEND(%r19),%r20    /* hardirq.h: unsigned long */
2237	cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
2238
2239syscall_check_resched:
2240
2241	/* check for reschedule */
2242
2243	LDREG  TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2244	CMPIB<>,n 0,%r19,syscall_do_resched /* forward */
2245
2246syscall_check_sig:
2247	ldo     -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1         /* get task ptr */
2248	/* check for pending signals */
2249	ldw     TASK_SIGPENDING(%r1),%r19
2250	cmpib,<>,n 0,%r19,syscall_do_signal  /* forward */
2251
2252syscall_restore:
2253	LDREG	TASK_PTRACE(%r1), %r19		/* Are we being ptraced? */
2254	bb,<,n	%r19,31,syscall_restore_rfi
2255
2256	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2257	rest_fp	%r19
2258
2259	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2260	mtsar	%r19
2261
2262	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2263	LDREG	TASK_PT_GR19(%r1),%r19
2264	LDREG   TASK_PT_GR20(%r1),%r20
2265	LDREG	TASK_PT_GR21(%r1),%r21
2266	LDREG	TASK_PT_GR22(%r1),%r22
2267	LDREG	TASK_PT_GR23(%r1),%r23
2268	LDREG	TASK_PT_GR24(%r1),%r24
2269	LDREG	TASK_PT_GR25(%r1),%r25
2270	LDREG	TASK_PT_GR26(%r1),%r26
2271	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2272	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2273	LDREG	TASK_PT_GR29(%r1),%r29
2274	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2275
2276	rsm     PSW_SM_I, %r0
2277	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2278	mfsp	%sr3,%r1			   /* Get users space id */
2279	mtsp    %r1,%sr7                           /* Restore sr7 */
2280	ssm     PSW_SM_I, %r0
2281	mtsp	%r1,%sr4			   /* Restore sr4 */
2282	mtsp	%r1,%sr5			   /* Restore sr5 */
2283	mtsp	%r1,%sr6			   /* Restore sr6 */
2284
2285	depi	3,31,2,%r31			   /* ensure return to user mode. */
2286
2287#ifdef __LP64__
2288	/* Since we are returning to a 32 bit user process, we always
2289	 * clear the W bit. This means that the be (and mtsp) gets
2290	 * executed in narrow mode, but that is OK, since we are
2291	 * returning to a 32 bit process. When we support 64 bit processes
2292	 * we won't clear the W bit, so the be will run in wide mode.
2293	 */
2294
2295	be	0(%sr3,%r31)			   /* return to user space */
2296	rsm	PSW_SM_W, %r0
2297#else
2298	be,n    0(%sr3,%r31)                       /* return to user space */
2299#endif
2300
2301	/* We have to return via an RFI, so that PSW T and R bits can be set
2302	 * appropriately.
2303	 * This sets up pt_regs so we can return via intr_restore, which is not
2304	 * the most efficient way of doing things, but it works.
2305	 */
2306syscall_restore_rfi:
2307	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2308	mtctl	%r2,%cr0			   /*   for immediate trap */
2309	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2310	ldi	0x0b,%r20			   /* Create new PSW */
2311	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2312	bb,>=,n	%r19,15,try_tbit		   /* PT_SINGLESTEP */
2313	depi	-1,27,1,%r20			   /* R bit */
2314try_tbit:
2315	bb,>=,n	%r19,14,psw_setup		   /* PT_BLOCKSTEP, see ptrace.c */
2316	depi	-1,7,1,%r20			   /* T bit */
2317psw_setup:
2318	STREG	%r20,TASK_PT_PSW(%r1)
2319
2320	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2321
2322	mfsp    %sr3,%r25
2323	STREG   %r25,TASK_PT_SR3(%r1)
2324	STREG   %r25,TASK_PT_SR4(%r1)
2325	STREG   %r25,TASK_PT_SR5(%r1)
2326	STREG   %r25,TASK_PT_SR6(%r1)
2327	STREG   %r25,TASK_PT_SR7(%r1)
2328	STREG   %r25,TASK_PT_IASQ0(%r1)
2329	STREG   %r25,TASK_PT_IASQ1(%r1)
2330
2331	/* XXX W bit??? */
2332	/* Now if old D bit is clear, it means we didn't save all registers
2333	 * on syscall entry, so do that now.  This only happens on TRACEME
2334	 * calls, or if someone attached to us while we were on a syscall.
2335	 * We could make this more efficient by not saving r3-r18, but
2336	 * then we wouldn't be able to use the common intr_restore path.
2337	 * It is only for traced processes anyway, so performance is not
2338	 * an issue.
2339	 */
2340	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2341	ldo	TASK_REGS(%r1),%r25
2342	reg_save %r25				   /* Save r3 to r18 */
2343	mfsp	%sr0,%r2
2344	STREG	%r2,TASK_PT_SR0(%r1)
2345	mfsp	%sr1,%r2
2346	STREG	%r2,TASK_PT_SR1(%r1)
2347	mfsp	%sr2,%r2
2348	STREG	%r2,TASK_PT_SR2(%r1)
2349pt_regs_ok:
2350	LDREG	TASK_PT_GR31(%r1),%r2
2351	depi	3,31,2,%r2			   /* ensure return to user mode. */
2352	STREG	%r2,TASK_PT_IAOQ0(%r1)
2353	ldo	4(%r2),%r2
2354	STREG	%r2,TASK_PT_IAOQ1(%r1)
2355
2356	b	intr_restore
2357	copy	%r25,%r16
2358
2359	.import do_softirq,code
2360syscall_do_softirq:
2361	bl      do_softirq,%r2
2362	nop
2363	b       syscall_check_resched
2364	ssm     PSW_SM_I, %r0  /* do_softirq returns with I bit off */
2365
2366	.import schedule,code
2367syscall_do_resched:
2368	bl	schedule,%r2
2369#ifdef __LP64__
2370	ldo	-16(%r30),%r29		/* Reference param save area */
2371#else
2372	nop
2373#endif
2374	b       syscall_check_bh  /* if resched, we start over again */
2375	nop
2376
2377	.import do_signal,code
2378syscall_do_signal:
2379	/* Save callee-save registers (for sigcontext).
2380	   FIXME: After this point the process structure should be
2381	   consistent with all the relevant state of the process
2382	   before the syscall.  We need to verify this. */
2383	ldo	TASK_REGS(%r1), %r25		/* struct pt_regs *regs */
2384	reg_save %r25
2385
2386	ldi	1, %r24				/* unsigned long in_syscall */
2387
2388#ifdef __LP64__
2389	ldo	-16(%r30),%r29			/* Reference param save area */
2390#endif
2391	bl	do_signal,%r2
2392	copy	%r0, %r26			/* sigset_t *oldset = NULL */
2393
2394	ldo     -TASK_SZ_ALGN-FRAME_SIZE(%r30), %r1     /* reload task ptr */
2395	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2396	reg_restore %r20
2397
2398	b,n     syscall_restore
2399
2400	/*
2401	 * get_register is used by the non access tlb miss handlers to
2402	 * copy the value of the general register specified in r8 into
2403	 * r1. This routine can't be used for shadowed registers, since
2404	 * the rfir will restore the original value. So, for the shadowed
2405	 * registers we put a -1 into r1 to indicate that the register
2406	 * should not be used (the register being copied could also have
2407	 * a -1 in it, but that is OK, it just means that we will have
2408	 * to use the slow path instead).
2409	 */
2410
2411get_register:
2412	blr     %r8,%r0
2413	nop
2414	bv      %r0(%r25)    /* r0 */
2415	copy    %r0,%r1
2416	bv      %r0(%r25)    /* r1 - shadowed */
2417	ldi     -1,%r1
2418	bv      %r0(%r25)    /* r2 */
2419	copy    %r2,%r1
2420	bv      %r0(%r25)    /* r3 */
2421	copy    %r3,%r1
2422	bv      %r0(%r25)    /* r4 */
2423	copy    %r4,%r1
2424	bv      %r0(%r25)    /* r5 */
2425	copy    %r5,%r1
2426	bv      %r0(%r25)    /* r6 */
2427	copy    %r6,%r1
2428	bv      %r0(%r25)    /* r7 */
2429	copy    %r7,%r1
2430	bv      %r0(%r25)    /* r8 - shadowed */
2431	ldi     -1,%r1
2432	bv      %r0(%r25)    /* r9 - shadowed */
2433	ldi     -1,%r1
2434	bv      %r0(%r25)    /* r10 */
2435	copy    %r10,%r1
2436	bv      %r0(%r25)    /* r11 */
2437	copy    %r11,%r1
2438	bv      %r0(%r25)    /* r12 */
2439	copy    %r12,%r1
2440	bv      %r0(%r25)    /* r13 */
2441	copy    %r13,%r1
2442	bv      %r0(%r25)    /* r14 */
2443	copy    %r14,%r1
2444	bv      %r0(%r25)    /* r15 */
2445	copy    %r15,%r1
2446	bv      %r0(%r25)    /* r16 - shadowed */
2447	ldi     -1,%r1
2448	bv      %r0(%r25)    /* r17 - shadowed */
2449	ldi     -1,%r1
2450	bv      %r0(%r25)    /* r18 */
2451	copy    %r18,%r1
2452	bv      %r0(%r25)    /* r19 */
2453	copy    %r19,%r1
2454	bv      %r0(%r25)    /* r20 */
2455	copy    %r20,%r1
2456	bv      %r0(%r25)    /* r21 */
2457	copy    %r21,%r1
2458	bv      %r0(%r25)    /* r22 */
2459	copy    %r22,%r1
2460	bv      %r0(%r25)    /* r23 */
2461	copy    %r23,%r1
2462	bv      %r0(%r25)    /* r24 - shadowed */
2463	ldi     -1,%r1
2464	bv      %r0(%r25)    /* r25 - shadowed */
2465	ldi     -1,%r1
2466	bv      %r0(%r25)    /* r26 */
2467	copy    %r26,%r1
2468	bv      %r0(%r25)    /* r27 */
2469	copy    %r27,%r1
2470	bv      %r0(%r25)    /* r28 */
2471	copy    %r28,%r1
2472	bv      %r0(%r25)    /* r29 */
2473	copy    %r29,%r1
2474	bv      %r0(%r25)    /* r30 */
2475	copy    %r30,%r1
2476	bv      %r0(%r25)    /* r31 */
2477	copy    %r31,%r1
2478
2479	/*
2480	 * set_register is used by the non access tlb miss handlers to
2481	 * copy the value of r1 into the general register specified in
2482	 * r8.
2483	 */
2484
2485set_register:
2486	blr     %r8,%r0
2487	nop
2488	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2489	copy    %r1,%r0
2490	bv      %r0(%r25)    /* r1 */
2491	copy    %r1,%r1
2492	bv      %r0(%r25)    /* r2 */
2493	copy    %r1,%r2
2494	bv      %r0(%r25)    /* r3 */
2495	copy    %r1,%r3
2496	bv      %r0(%r25)    /* r4 */
2497	copy    %r1,%r4
2498	bv      %r0(%r25)    /* r5 */
2499	copy    %r1,%r5
2500	bv      %r0(%r25)    /* r6 */
2501	copy    %r1,%r6
2502	bv      %r0(%r25)    /* r7 */
2503	copy    %r1,%r7
2504	bv      %r0(%r25)    /* r8 */
2505	copy    %r1,%r8
2506	bv      %r0(%r25)    /* r9 */
2507	copy    %r1,%r9
2508	bv      %r0(%r25)    /* r10 */
2509	copy    %r1,%r10
2510	bv      %r0(%r25)    /* r11 */
2511	copy    %r1,%r11
2512	bv      %r0(%r25)    /* r12 */
2513	copy    %r1,%r12
2514	bv      %r0(%r25)    /* r13 */
2515	copy    %r1,%r13
2516	bv      %r0(%r25)    /* r14 */
2517	copy    %r1,%r14
2518	bv      %r0(%r25)    /* r15 */
2519	copy    %r1,%r15
2520	bv      %r0(%r25)    /* r16 */
2521	copy    %r1,%r16
2522	bv      %r0(%r25)    /* r17 */
2523	copy    %r1,%r17
2524	bv      %r0(%r25)    /* r18 */
2525	copy    %r1,%r18
2526	bv      %r0(%r25)    /* r19 */
2527	copy    %r1,%r19
2528	bv      %r0(%r25)    /* r20 */
2529	copy    %r1,%r20
2530	bv      %r0(%r25)    /* r21 */
2531	copy    %r1,%r21
2532	bv      %r0(%r25)    /* r22 */
2533	copy    %r1,%r22
2534	bv      %r0(%r25)    /* r23 */
2535	copy    %r1,%r23
2536	bv      %r0(%r25)    /* r24 */
2537	copy    %r1,%r24
2538	bv      %r0(%r25)    /* r25 */
2539	copy    %r1,%r25
2540	bv      %r0(%r25)    /* r26 */
2541	copy    %r1,%r26
2542	bv      %r0(%r25)    /* r27 */
2543	copy    %r1,%r27
2544	bv      %r0(%r25)    /* r28 */
2545	copy    %r1,%r28
2546	bv      %r0(%r25)    /* r29 */
2547	copy    %r1,%r29
2548	bv      %r0(%r25)    /* r30 */
2549	copy    %r1,%r30
2550	bv      %r0(%r25)    /* r31 */
2551	copy    %r1,%r31
2552