1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/entry.S
7 *
8 * Copyright (C) 2000, 2001  Paolo Alberelli
9 *
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sys.h>
15
16#include <asm/processor.h>
17#include <asm/registers.h>
18#include <asm/unistd.h>
19
20/*
21 * A few defines that ought to come from sched.h referring
22 * to the task structure. Byte offsets within the task
23 * structure and related flags.
24 */
25#define flags		4
26#define sigpending	8
27#define need_resched	20
28#define ptrace		24
29
30#define PT_TRACESYS	0x00000002
31
32
33/*
34 * SR fields.
35 */
36#define SR_ASID_MASK	0x00ff0000
37#define SR_FD_MASK	0x00008000
38#define SR_SS		0x08000000
39#define SR_BL		0x10000000
40#define SR_MD		0x40000000
41#define SR_MMU		0x80000000
42
43/*
44 * Event code.
45 */
46#define	EVENT_INTERRUPT		0
47#define	EVENT_FAULT_TLB		1
48#define	EVENT_FAULT_NOT_TLB	2
49#define	EVENT_DEBUG		3
50
51/* EXPEVT values */
52#define	RESET_CAUSE		0x20
53#define DEBUGSS_CAUSE		0x980
54
55/*
56 * Frame layout. Quad index.
57 */
58#define	FRAME_T(x)	FRAME_TBASE+(x*8)
59#define	FRAME_R(x)	FRAME_RBASE+(x*8)
60#define	FRAME_S(x)	FRAME_SBASE+(x*8)
61#define FSPC		0
62#define FSSR		1
63#define FSYSCALL_ID	2
64
65/* Arrange the save frame to be a multiple of 32 bytes long */
66#define FRAME_SBASE	0
67#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
68#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
69#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* t0 -t7 */
70#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
71
72#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
73#define FP_FRAME_BASE	0
74
75#define	SAVED_R2	0*8
76#define	SAVED_R3	1*8
77#define	SAVED_R4	2*8
78#define	SAVED_R5	3*8
79#define	SAVED_R18	4*8
80#define	SAVED_R6	5*8
81#define	SAVED_T0	6*8
82
83/* These are the registers saved in the TLB path that aren't saved in the first
84   level of the normal one. */
85#define	TLB_SAVED_R25	7*8
86#define	TLB_SAVED_T1	8*8
87#define	TLB_SAVED_T2	9*8
88#define	TLB_SAVED_T3	10*8
89#define	TLB_SAVED_T4	11*8
90/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
91   breakage otherwise. */
92#define	TLB_SAVED_R0	12*8
93#define	TLB_SAVED_R1	13*8
94
95#define STI()				\
96	getcon	SR, r6;			\
97	andi	r6, ~0xf0, r6;		\
98	putcon	r6, SR;
99
100	.section	.data, "aw"
101
102#define FAST_TLBMISS_STACK_CACHELINES 4
103#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
104
105/* Register back-up area for all exceptions */
106	.balign	32
107	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
108	 * register saves etc. */
109	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
110/* This is 32 byte aligned by construction */
111/* Register back-up area for all exceptions */
112reg_save_area:
113	.quad	0
114	.quad	0
115	.quad	0
116	.quad	0
117
118	.quad	0
119	.quad	0
120	.quad	0
121	.quad	0
122
123	.quad	0
124	.quad	0
125	.quad	0
126	.quad	0
127
128	.quad	0
129	.quad   0
130
131/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
132 * reentrancy. Note this area may be accessed via physical address.
133 * Align so this fits a whole single cache line, for ease of purging.
134 */
135	.balign 32,0,32
136resvec_save_area:
137	.quad	0
138	.quad	0
139	.quad	0
140	.quad	0
141	.quad	0
142	.balign 32,0,32
143
144/* Jump table of 3rd level handlers  */
145trap_jtable:
146	.long	do_exception_error		/* 0x000 */
147	.long	do_exception_error		/* 0x020 */
148	.long	tlb_miss_load				/* 0x040 */
149	.long	tlb_miss_store				/* 0x060 */
150	.long	do_exception_error		/* 0x080 */
151	.long	tlb_miss_load				/* 0x0A0 */
152	.long	tlb_miss_store				/* 0x0C0 */
153	.long	do_address_error_load	/* 0x0E0 */
154	.long	do_address_error_store	/* 0x100 */
155#ifndef CONFIG_NOFPU_SUPPORT
156	.long	do_fpu_error		/* 0x120 */
157#else
158	.long	do_exception_error		/* 0x120 */
159#endif
160	.long	do_exception_error		/* 0x140 */
161	.long	system_call				/* 0x160 */
162	.long	do_reserved_inst		/* 0x180 */
163	.long	do_illegal_slot_inst	/* 0x1A0 */
164	.long	do_NMI			/* 0x1C0 */
165	.long	do_exception_error		/* 0x1E0 */
166	.rept 15
167		.long do_IRQ		/* 0x200 - 0x3C0 */
168	.endr
169	.long	do_exception_error		/* 0x3E0 */
170	.rept 32
171		.long do_IRQ		/* 0x400 - 0x7E0 */
172	.endr
173	.long	fpu_error_or_IRQA			/* 0x800 */
174	.long	fpu_error_or_IRQB			/* 0x820 */
175	.long	do_IRQ			/* 0x840 */
176	.long	do_IRQ			/* 0x860 */
177	.rept 6
178		.long do_exception_error	/* 0x880 - 0x920 */
179	.endr
180	.long	do_software_break_point	/* 0x940 */
181	.long	do_exception_error		/* 0x960 */
182	.long	do_single_step		/* 0x980 */
183
184	.rept 3
185		.long do_exception_error	/* 0x9A0 - 0x9E0 */
186	.endr
187	.long	do_IRQ			/* 0xA00 */
188	.long	do_IRQ			/* 0xA20 */
189	.long	itlb_miss_or_IRQ			/* 0xA40 */
190	.long	do_IRQ			/* 0xA60 */
191	.long	do_IRQ			/* 0xA80 */
192	.long	itlb_miss_or_IRQ			/* 0xAA0 */
193	.long	do_exception_error		/* 0xAC0 */
194	.long	do_address_error_exec	/* 0xAE0 */
195	.rept 8
196		.long do_exception_error	/* 0xB00 - 0xBE0 */
197	.endr
198	.rept 18
199		.long do_IRQ		/* 0xC00 - 0xE20 */
200	.endr
201
202
203/* System calls jump table */
204
205.globl  sys_call_table
206sys_call_table:
207	.long sys_ni_syscall	/* 0  -  old "setup()" system call  */
208	.long sys_exit
209	.long sys_fork
210	.long sys_read
211	.long sys_write
212	.long sys_open		/* 5 */
213	.long sys_close
214	.long sys_waitpid
215	.long sys_creat
216	.long sys_link
217	.long sys_unlink		/* 10 */
218	.long sys_execve
219	.long sys_chdir
220	.long sys_time
221	.long sys_mknod
222	.long sys_chmod		/* 15 */
223	.long sys_lchown16
224	.long sys_ni_syscall	/* old break syscall holder */
225	.long sys_stat
226	.long sys_lseek
227	.long sys_getpid		/* 20 */
228	.long sys_mount
229	.long sys_oldumount
230	.long sys_setuid16
231	.long sys_getuid16
232	.long sys_stime		/* 25 */
233	.long sys_ptrace
234	.long sys_alarm
235	.long sys_fstat
236	.long sys_pause
237	.long sys_utime		/* 30 */
238	.long sys_ni_syscall	/* old stty syscall holder */
239	.long sys_ni_syscall	/* old gtty syscall holder */
240	.long sys_access
241	.long sys_nice
242	.long sys_ni_syscall	/* 35 */
243						/* old ftime syscall holder */
244	.long sys_sync
245	.long sys_kill
246	.long sys_rename
247	.long sys_mkdir
248	.long sys_rmdir		/* 40 */
249	.long sys_dup
250	.long sys_pipe
251	.long sys_times
252	.long sys_ni_syscall	/* old prof syscall holder */
253	.long sys_brk		/* 45 */
254	.long sys_setgid16
255	.long sys_getgid16
256	.long sys_signal
257	.long sys_geteuid16
258	.long sys_getegid16	/* 50 */
259	.long sys_acct
260	.long sys_umount		/* recycled never used phys( */
261	.long sys_ni_syscall	/* old lock syscall holder */
262	.long sys_ioctl
263	.long sys_fcntl		/* 55 */
264	.long sys_ni_syscall	/* old mpx syscall holder */
265	.long sys_setpgid
266	.long sys_ni_syscall	/* old ulimit syscall holder */
267	.long sys_ni_syscall	/* sys_olduname */
268	.long sys_umask		/* 60 */
269	.long sys_chroot
270	.long sys_ustat
271	.long sys_dup2
272	.long sys_getppid
273	.long sys_getpgrp		/* 65 */
274	.long sys_setsid
275	.long sys_sigaction
276	.long sys_sgetmask
277	.long sys_ssetmask
278	.long sys_setreuid16	/* 70 */
279	.long sys_setregid16
280	.long sys_sigsuspend
281	.long sys_sigpending
282	.long sys_sethostname
283	.long sys_setrlimit	/* 75 */
284	.long sys_old_getrlimit
285	.long sys_getrusage
286	.long sys_gettimeofday
287	.long sys_settimeofday
288	.long sys_getgroups16	/* 80 */
289	.long sys_setgroups16
290	.long sys_ni_syscall	/* sys_oldselect */
291	.long sys_symlink
292	.long sys_lstat
293	.long sys_readlink		/* 85 */
294	.long sys_uselib
295	.long sys_swapon
296	.long sys_reboot
297	.long old_readdir
298	.long old_mmap		/* 90 */
299	.long sys_munmap
300	.long sys_truncate
301	.long sys_ftruncate
302	.long sys_fchmod
303	.long sys_fchown16		/* 95 */
304	.long sys_getpriority
305	.long sys_setpriority
306	.long sys_ni_syscall	/* old profil syscall holder */
307	.long sys_statfs
308	.long sys_fstatfs		/* 100 */
309	.long sys_ni_syscall	/* ioperm */
310	.long sys_socketcall	/* Obsolete implementation of socket syscall */
311	.long sys_syslog
312	.long sys_setitimer
313	.long sys_getitimer	/* 105 */
314	.long sys_newstat
315	.long sys_newlstat
316	.long sys_newfstat
317	.long sys_uname
318	.long sys_ni_syscall	/* 110 */ /* iopl */
319	.long sys_vhangup
320	.long sys_ni_syscall	/* idle */
321	.long sys_ni_syscall	/* vm86old */
322	.long sys_wait4
323	.long sys_swapoff		/* 115 */
324	.long sys_sysinfo
325	.long sys_ipc		/* Obsolete ipc syscall implementation */
326	.long sys_fsync
327	.long sys_sigreturn
328	.long sys_clone		/* 120 */
329	.long sys_setdomainname
330	.long sys_newuname
331	.long sys_ni_syscall	/* sys_modify_ldt */
332	.long sys_adjtimex
333	.long sys_mprotect		/* 125 */
334	.long sys_sigprocmask
335	.long sys_create_module
336	.long sys_init_module
337	.long sys_delete_module
338	.long sys_get_kernel_syms	/* 130 */
339	.long sys_quotactl
340	.long sys_getpgid
341	.long sys_fchdir
342	.long sys_bdflush
343	.long sys_sysfs		/* 135 */
344	.long sys_personality
345	.long sys_ni_syscall	/* for afs_syscall */
346	.long sys_setfsuid16
347	.long sys_setfsgid16
348	.long sys_llseek		/* 140 */
349	.long sys_getdents
350	.long sys_select
351	.long sys_flock
352	.long sys_msync
353	.long sys_readv		/* 145 */
354	.long sys_writev
355	.long sys_getsid
356	.long sys_fdatasync
357	.long sys_sysctl
358	.long sys_mlock		/* 150 */
359	.long sys_munlock
360	.long sys_mlockall
361	.long sys_munlockall
362	.long sys_sched_setparam
363	.long sys_sched_getparam   	/* 155 */
364	.long sys_sched_setscheduler
365	.long sys_sched_getscheduler
366	.long sys_sched_yield
367	.long sys_sched_get_priority_max
368	.long sys_sched_get_priority_min  /* 160 */
369	.long sys_sched_rr_get_interval
370	.long sys_nanosleep
371	.long sys_mremap
372	.long sys_setresuid16
373	.long sys_getresuid16	/* 165 */
374	.long sys_ni_syscall	/* vm86 */
375	.long sys_query_module
376	.long sys_poll
377	.long sys_nfsservctl
378	.long sys_setresgid16	/* 170 */
379	.long sys_getresgid16
380	.long sys_prctl
381	.long sys_rt_sigreturn
382	.long sys_rt_sigaction
383	.long sys_rt_sigprocmask	/* 175 */
384	.long sys_rt_sigpending
385	.long sys_rt_sigtimedwait
386	.long sys_rt_sigqueueinfo
387	.long sys_rt_sigsuspend
388	.long sys_pread		/* 180 */
389	.long sys_pwrite
390	.long sys_chown16
391	.long sys_getcwd
392	.long sys_capget
393	.long sys_capset       	/* 185 */
394	.long sys_sigaltstack
395	.long sys_sendfile
396	.long sys_ni_syscall	/* streams1 */
397	.long sys_ni_syscall	/* streams2 */
398	.long sys_vfork        	/* 190 */
399	.long sys_getrlimit
400	.long sys_mmap2
401	.long sys_truncate64
402	.long sys_ftruncate64
403	.long sys_stat64		/* 195 */
404	.long sys_lstat64
405	.long sys_fstat64
406	.long sys_lchown
407	.long sys_getuid
408	.long sys_getgid		/* 200 */
409	.long sys_geteuid
410	.long sys_getegid
411	.long sys_setreuid
412	.long sys_setregid
413	.long sys_getgroups	/* 205 */
414	.long sys_setgroups
415	.long sys_fchown
416	.long sys_setresuid
417	.long sys_getresuid
418	.long sys_setresgid	/* 210 */
419	.long sys_getresgid
420	.long sys_chown
421	.long sys_setuid
422	.long sys_setgid
423	.long sys_setfsuid		/* 215 */
424	.long sys_setfsgid
425	.long sys_pivot_root
426	.long sys_mincore
427	.long sys_madvise
428	.long sys_socket		/* 220 */
429	.long sys_bind
430	.long sys_connect
431	.long sys_listen
432	.long sys_accept
433	.long sys_getsockname	/* 225 */
434	.long sys_getpeername
435	.long sys_socketpair
436	.long sys_send
437	.long sys_sendto
438	.long sys_recv		/* 230*/
439	.long sys_recvfrom
440	.long sys_shutdown
441	.long sys_setsockopt
442	.long sys_getsockopt
443	.long sys_sendmsg		/* 235 */
444	.long sys_recvmsg
445	.long sys_semop		/* New ipc syscall implementation */
446	.long sys_semget
447	.long sys_semctl
448	.long sys_msgsnd		/* 240 */
449	.long sys_msgrcv
450	.long sys_msgget
451	.long sys_msgctl
452	.long sys_shmatcall
453	.long sys_shmdt		/* 245 */
454	.long sys_shmget
455	.long sys_shmctl
456
457	/*
458	 * NOTE!! This doesn't have to be exact - we just have
459	 * to make sure we have _enough_ of the "sys_ni_syscall"
460	 * entries. Don't panic if you notice that this hasn't
461	 * been shrunk every time we add a new system call.
462	 */
463	.rept NR_syscalls-247
464		.long sys_ni_syscall
465	.endr
466
467	.section	.text64, "ax"
468
469/*
470 * --- Exception/Interrupt/Event Handling Section
471 */
472
473/*
474 * VBR and RESVEC blocks.
475 *
476 * First level handler for VBR-based exceptions.
477 *
478 * To avoid waste of space, align to the maximum text block size.
479 * This is assumed to be at most 128 bytes or 32 instructions.
480 * DO NOT EXCEED 32 instructions on the first level handlers !
481 *
482 * Also note that RESVEC is contained within the VBR block
483 * where the room left (1KB - TEXT_SIZE) allows placing
484 * the RESVEC block (at most 512B + TEXT_SIZE).
485 *
486 * So first (and only) level handler for RESVEC-based exceptions.
487 *
488 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
489 * and interrupt) we are a lot tight with register space until
490 * saving onto the stack frame, which is done in handle_exception().
491 *
492 */
493
494#define	TEXT_SIZE 	128
495#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
496
497	.balign TEXT_SIZE
498LVBR_block:
499	.space	256, 0			/* Power-on class handler, */
500					/* not required here       */
501not_a_tlb_miss:
502	/* Save original stack pointer into KCR1 */
503	putcon	SP, KCR1
504
505	/* Save other original registers into reg_save_area */
506        _loada  reg_save_area, SP
507	st.q	SP, SAVED_R2, r2
508	st.q	SP, SAVED_R3, r3
509	st.q	SP, SAVED_R4, r4
510	st.q	SP, SAVED_R5, r5
511	st.q	SP, SAVED_R6, r6
512	st.q	SP, SAVED_R18, r18
513	gettr	t0, r3
514	st.q	SP, SAVED_T0, r3
515
516#ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
517	/* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
518	movi    0x100, r3
519	putcon  r3, dcr
520#endif
521
522	/* Set args for Non-debug, Not a TLB miss class handler */
523	getcon	EXPEVT, r2
524	_loada	ret_from_exception, r3
525	ori	r3, 1, r3
526	movi	EVENT_FAULT_NOT_TLB, r4
527	or	SP, ZERO, r5
528	getcon	KCR1, SP
529	_ptar	handle_exception, t0
530	blink	t0, ZERO
531
532	/*
533	 * Instead of the natural .balign 1024 place RESVEC here
534	 * respecting the final 1KB alignment.
535	 */
536	.balign TEXT_SIZE
537	/*
538	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
539	 * block making sure the final alignment is correct.
540	 */
541LRESVEC_block:
542	/* Panic handler. Called with MMU off. Possible causes/actions:
543	 * - Reset:		Jump to program start.
544	 * - Single Step:	Turn off Single Step & return.
545	 * - Others:		Call panic handler, passing PC as arg.
546	 *			(this may need to be extended...)
547	 */
548reset_or_panic:
549	putcon	SP, DCR
550	/* First save r0-1 and tr0, as we need to use these */
551	_loada	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
552	st.q	SP, 0, r0
553	st.q	SP, 8, r1
554	gettr	t0, r0
555	st.q	SP, 32, r0
556
557	/* Check cause */
558	getcon	EXPEVT, r0
559	movi	RESET_CAUSE, r1
560	sub	r1, r0, r1		/* r1=0 if reset */
561	_loada	_stext-CONFIG_CACHED_MEMORY_OFFSET, r0
562	ori	r0, 1, r0
563	ptabs	r0, t0
564	beqi	r1, 0, t0		/* Jump to start address if reset */
565
566	getcon	EXPEVT, r0
567	movi	DEBUGSS_CAUSE, r1
568	sub	r1, r0, r1		/* r1=0 if single step */
569	_ptar	single_step_panic, t0
570	beqi	r1, 0, t0		/* jump if single step */
571
572	/* If we get here, we have an unknown panic. Just call the panic
573	 * handler, passing saved PC. We never expect to return, so we can
574	 * use any regs now. */
575	getcon	SPC,r2
576	getcon	SSR,r3
577	getcon	EXPEVT,r4
578	/* Prepare to jump to C - physical address */
579	_loada	panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
580	ori	r1, 1, r1
581	ptabs   r1, t0
582	getcon	DCR, SP
583	blink	t0, ZERO
584
585single_step_panic:
586	/* We are in a handler with Single Step set. We need to resume the
587	 * handler, by turning on MMU & turning off Single Step. */
588	getcon	SSR, r0
589	movi	SR_MMU, r1
590	or	r0, r1, r0
591	movi	~SR_SS, r1
592	and	r0, r1, r0
593	putcon	r0, SSR
594	/* Restore EXPEVT, as the rte won't do this */
595	getcon	PEXPEVT, r0
596	putcon	r0, EXPEVT
597	/* Restore regs */
598	ld.q	SP, 32, r0
599	ptabs	r0, tr0
600	ld.q	SP, 0, r0
601	ld.q	SP, 8, r1
602	getcon	DCR, SP
603	synco
604	rte
605
606	.balign	TEXT_SIZE
607debug_exception:
608	/*
609	 * Single step/software_break_point first level handler.
610	 * Called with MMU off, so the first thing we do is enable it
611	 * by doing an rte with appropriate SSR.
612	 */
613	putcon	SP, DCR
614	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
615	_loada	resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
616
617	/* With the MMU off, we are bypassing the cache, so purge any
618         * data that will be made stale by the following stores.
619         */
620	ocbp	SP, 0
621	synco
622
623	st.q	SP, 0, r0
624	st.q	SP, 8, r1
625	getcon	SPC, r0
626	st.q	SP, 16, r0
627	getcon	SSR, r0
628	st.q	SP, 24, r0
629
630	/* Enable MMU, block exceptions, set priv mode, disable single step */
631	movi	SR_MMU | SR_BL | SR_MD, r1
632	or	r0, r1, r0
633	movi	~SR_SS, r1
634	and	r0, r1, r0
635	putcon	r0, SSR
636	/* Force control to debug_exception_2 when rte is executed */
637	_loada	debug_exeception_2, r0
638	ori	r0, 1, r0      /* force SHmedia, just in case */
639	putcon	r0, SPC
640	getcon	DCR, SP
641	synco
642	rte
643debug_exeception_2:
644	/* Restore saved regs */
645	putcon	SP, KCR1
646	_loada	resvec_save_area, SP
647	ld.q	SP, 24, r0
648	putcon	r0, SSR
649	ld.q	SP, 16, r0
650	putcon	r0, SPC
651	ld.q	SP, 0, r0
652	ld.q	SP, 8, r1
653
654	/* Save other original registers into reg_save_area */
655        _loada  reg_save_area, SP
656	st.q	SP, SAVED_R2, r2
657	st.q	SP, SAVED_R3, r3
658	st.q	SP, SAVED_R4, r4
659	st.q	SP, SAVED_R5, r5
660	st.q	SP, SAVED_R6, r6
661	st.q	SP, SAVED_R18, r18
662	gettr	t0, r3
663	st.q	SP, SAVED_T0, r3
664
665	/* Set args for debug class handler */
666	getcon	EXPEVT, r2
667	_loada	ret_from_exception, r3
668	ori	r3, 1, r3
669	movi	EVENT_DEBUG, r4
670	or	SP, ZERO, r5
671	getcon	KCR1, SP
672	_ptar	handle_exception, t0
673	blink	t0, ZERO
674
675	.balign	TEXT_SIZE
676debug_interrupt:
677	/*
678	 * Not supported. If we ever get here loop forever
679	 * We may be MMUOFF or MMUON. Just use pic code.
680	 */
681	_ptar	debug_interrupt, t0
682	blink	t0, ZERO
683	.balign	TEXT_SIZE
684
685LRESVEC_block_end:			/* Marker. Unused. */
686tlb_miss:
687	putcon	SP, KCR1
688	_loada	reg_save_area, SP
689	/* SP is guaranteed 32-byte aligned. */
690	st.q	SP, TLB_SAVED_R0 , r0
691	st.q	SP, TLB_SAVED_R1 , r1
692	st.q	SP, SAVED_R2 , r2
693	st.q	SP, SAVED_R3 , r3
694	st.q	SP, SAVED_R4 , r4
695	st.q	SP, SAVED_R5 , r5
696	st.q	SP, SAVED_R6 , r6
697	st.q	SP, SAVED_R18, r18
698
699	/* Save R25 for safety; as/ld may want to use it to achieve the call to
700	 * the code in mm/tlbmiss.c */
701	st.q	SP, TLB_SAVED_R25, r25
702	gettr	tr0, r2
703	gettr	tr1, r3
704	gettr	tr2, r4
705	gettr	tr3, r5
706	gettr	tr4, r18
707	st.q	SP, SAVED_T0 , r2
708	st.q	SP, TLB_SAVED_T1 , r3
709	st.q	SP, TLB_SAVED_T2 , r4
710	st.q	SP, TLB_SAVED_T3 , r5
711	st.q	SP, TLB_SAVED_T4 , r18
712
713	pt	do_fast_page_fault, tr0
714	getcon	SSR, r2
715	getcon	EXPEVT, r3
716	getcon	TEA, r4
717	shlri	r2, 30, r2
718	andi	r2, 1, r2	/* r2 = SSR.MD */
719	blink 	tr0, r18
720
721	pt	fixup_to_invoke_general_handler, tr1
722
723	/* If the fast path handler fixed the fault, just drop through quickly
724	   to the restore code right away to return to the excepting context.
725	   */
726	beqi/u	r2, 0, tr1
727
728fast_tlb_miss_restore:
729	ld.q	SP, SAVED_T0, r2
730	ld.q	SP, TLB_SAVED_T1, r3
731	ld.q	SP, TLB_SAVED_T2, r4
732
733	ld.q	SP, TLB_SAVED_T3, r5
734	ld.q	SP, TLB_SAVED_T4, r18
735
736	ptabs	r2, tr0
737	ptabs	r3, tr1
738	ptabs	r4, tr2
739	ptabs	r5, tr3
740	ptabs	r18, tr4
741
742	ld.q	SP, TLB_SAVED_R0, r0
743	ld.q	SP, TLB_SAVED_R1, r1
744	ld.q	SP, SAVED_R2, r2
745	ld.q	SP, SAVED_R3, r3
746	ld.q	SP, SAVED_R4, r4
747	ld.q	SP, SAVED_R5, r5
748	ld.q	SP, SAVED_R6, r6
749	ld.q	SP, SAVED_R18, r18
750	ld.q	SP, TLB_SAVED_R25, r25
751
752	getcon	KCR1, SP
753	rte
754	nop /* for safety, in case the code is run on sh5-101 cut1.x */
755
756fixup_to_invoke_general_handler:
757
758	/* OK, new method.  Restore stuff that's not expected to get saved into
759	   the 'first-level' reg save area, then just fall through to setting
760	   up the registers and calling the second-level handler. */
761
762	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
763	   r25,tr1-4 and save r6 to get into the right state.  */
764
765	ld.q	SP, TLB_SAVED_T1, r3
766	ld.q	SP, TLB_SAVED_T2, r4
767	ld.q	SP, TLB_SAVED_T3, r5
768	ld.q	SP, TLB_SAVED_T4, r18
769	ld.q	SP, TLB_SAVED_R25, r25
770
771	ld.q	SP, TLB_SAVED_R0, r0
772	ld.q	SP, TLB_SAVED_R1, r1
773
774	ptabs/u	r3, tr1
775	ptabs/u	r4, tr2
776	ptabs/u	r5, tr3
777	ptabs/u	r18, tr4
778
779#ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
780	/* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
781	movi    0x400, r3
782	putcon  r3, dcr
783#endif
784
785	/* Set args for Non-debug, TLB miss class handler */
786	getcon	EXPEVT, r2
787	_loada	ret_from_exception, r3
788	ori	r3, 1, r3
789	movi	EVENT_FAULT_TLB, r4
790	or	SP, ZERO, r5
791	getcon	KCR1, SP
792	_ptar	handle_exception, t0
793	blink	t0, ZERO
794
795/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
796   DOES END UP AT VBR+0x600 */
797	nop
798	nop
799	nop
800	nop
801	nop
802	nop
803#if 0
804	.balign 256
805	nop
806#endif
807	.balign 256
808	/* VBR + 0x600 */
809
810interrupt:
811	/* Save original stack pointer into KCR1 */
812	putcon	SP, KCR1
813
814	/* Save other original registers into reg_save_area */
815        _loada  reg_save_area, SP
816	st.q	SP, SAVED_R2, r2
817	st.q	SP, SAVED_R3, r3
818	st.q	SP, SAVED_R4, r4
819	st.q	SP, SAVED_R5, r5
820	st.q	SP, SAVED_R6, r6
821	st.q	SP, SAVED_R18, r18
822	gettr	t0, r3
823	st.q	SP, SAVED_T0, r3
824
825#ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
826	/* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
827	movi    0x600, r3
828	putcon  r3, dcr
829#endif
830
831	/* Set args for interrupt class handler */
832	getcon	INTEVT, r2
833	_loada	ret_from_irq, r3
834	ori	r3, 1, r3
835	movi	EVENT_INTERRUPT, r4
836	or	SP, ZERO, r5
837	getcon	KCR1, SP
838	_ptar	handle_exception, t0
839	blink	t0, ZERO
840	.balign	TEXT_SIZE		/* let's waste the bare minimum */
841
842LVBR_block_end:				/* Marker. Used for total checking */
843
844
845/*
846 * Second level handler for VBR-based exceptions. Pre-handler.
847 * In common to all stack-frame sensitive handlers.
848 *
849 * Inputs:
850 * (KCR0) Current [current task union]
851 * (KCR1) Original SP
852 * (r2)   INTEVT/EXPEVT
853 * (r3)   appropriate return address
854 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
855 * (r5)   Pointer to reg_save_area
856 * (SP)   Original SP
857 *
858 * Available registers:
859 * (r6)
860 * (r18)
861 * (t0)
862 *
863 */
864handle_exception:
865	/* Common 2nd level handler. */
866
867	/* First thing we need an appropriate stack pointer */
868	getcon	SSR, r6
869	shlri	r6, 30, r6
870	andi	r6, 1, r6
871	_ptar	stack_ok, t0
872	bne	r6, ZERO, t0		/* Original stack pointer is fine */
873
874	/* Set stack pointer for user fault */
875	getcon	KCR0, SP
876	movi	THREAD_SIZE, r6		/* Point to the end */
877	add	SP, r6, SP
878
879stack_ok:
880	/* Make some room for the BASIC frame. */
881	movi	-(FRAME_SIZE), r6
882	add	SP, r6, SP
883
884/* Could do this with no stalling if we had another spare register, but the
885   code below will be OK. */
886	ld.q	r5, SAVED_R2, r6
887	ld.q	r5, SAVED_R3, r18
888	st.q	SP, FRAME_R(2), r6
889	ld.q	r5, SAVED_R4, r6
890	st.q	SP, FRAME_R(3), r18
891	ld.q	r5, SAVED_R5, r18
892	st.q	SP, FRAME_R(4), r6
893	ld.q	r5, SAVED_R6, r6
894	st.q	SP, FRAME_R(5), r18
895	ld.q	r5, SAVED_R18, r18
896	st.q	SP, FRAME_R(6), r6
897	ld.q	r5, SAVED_T0, r6
898	st.q	SP, FRAME_R(18), r18
899	st.q	SP, FRAME_T(0), r6
900
901	/* Keep old SP around */
902	getcon	KCR1, r6
903
904	/* Save the rest of the general purpose registers */
905	st.q	SP, FRAME_R(0), r0
906	st.q	SP, FRAME_R(1), r1
907	st.q	SP, FRAME_R(7), r7
908	st.q	SP, FRAME_R(8), r8
909	st.q	SP, FRAME_R(9), r9
910	st.q	SP, FRAME_R(10), r10
911	st.q	SP, FRAME_R(11), r11
912	st.q	SP, FRAME_R(12), r12
913	st.q	SP, FRAME_R(13), r13
914	st.q	SP, FRAME_R(14), r14
915
916	/* SP is somewhere else */
917	st.q	SP, FRAME_R(15), r6
918
919	st.q	SP, FRAME_R(16), r16
920	st.q	SP, FRAME_R(17), r17
921	/* r18 is saved earlier. */
922	st.q	SP, FRAME_R(19), r19
923	st.q	SP, FRAME_R(20), r20
924	st.q	SP, FRAME_R(21), r21
925	st.q	SP, FRAME_R(22), r22
926	st.q	SP, FRAME_R(23), r23
927	st.q	SP, FRAME_R(24), r24
928	st.q	SP, FRAME_R(25), r25
929	st.q	SP, FRAME_R(26), r26
930	st.q	SP, FRAME_R(27), r27
931	st.q	SP, FRAME_R(28), r28
932	st.q	SP, FRAME_R(29), r29
933	st.q	SP, FRAME_R(30), r30
934	st.q	SP, FRAME_R(31), r31
935	st.q	SP, FRAME_R(32), r32
936	st.q	SP, FRAME_R(33), r33
937	st.q	SP, FRAME_R(34), r34
938	st.q	SP, FRAME_R(35), r35
939	st.q	SP, FRAME_R(36), r36
940	st.q	SP, FRAME_R(37), r37
941	st.q	SP, FRAME_R(38), r38
942	st.q	SP, FRAME_R(39), r39
943	st.q	SP, FRAME_R(40), r40
944	st.q	SP, FRAME_R(41), r41
945	st.q	SP, FRAME_R(42), r42
946	st.q	SP, FRAME_R(43), r43
947	st.q	SP, FRAME_R(44), r44
948	st.q	SP, FRAME_R(45), r45
949	st.q	SP, FRAME_R(46), r46
950	st.q	SP, FRAME_R(47), r47
951	st.q	SP, FRAME_R(48), r48
952	st.q	SP, FRAME_R(49), r49
953	st.q	SP, FRAME_R(50), r50
954	st.q	SP, FRAME_R(51), r51
955	st.q	SP, FRAME_R(52), r52
956	st.q	SP, FRAME_R(53), r53
957	st.q	SP, FRAME_R(54), r54
958	st.q	SP, FRAME_R(55), r55
959	st.q	SP, FRAME_R(56), r56
960	st.q	SP, FRAME_R(57), r57
961	st.q	SP, FRAME_R(58), r58
962	st.q	SP, FRAME_R(59), r59
963	st.q	SP, FRAME_R(60), r60
964	st.q	SP, FRAME_R(61), r61
965	st.q	SP, FRAME_R(62), r62
966
967	/*
968	 * Save the S* registers.
969	 */
970	getcon	SSR, r61
971	st.q	SP, FRAME_S(FSSR), r61
972	getcon	SPC, r62
973	st.q	SP, FRAME_S(FSPC), r62
974	movi	-1, r62			/* Reset syscall_nr */
975	st.q	SP, FRAME_S(FSYSCALL_ID), r62
976
977	/* Save the rest of the target registers */
978	gettr	t1, r6
979	st.q	SP, FRAME_T(1), r6
980	gettr	t2, r6
981	st.q	SP, FRAME_T(2), r6
982	gettr	t3, r6
983	st.q	SP, FRAME_T(3), r6
984	gettr	t4, r6
985	st.q	SP, FRAME_T(4), r6
986	gettr	t5, r6
987	st.q	SP, FRAME_T(5), r6
988	gettr	t6, r6
989	st.q	SP, FRAME_T(6), r6
990	gettr	t7, r6
991	st.q	SP, FRAME_T(7), r6
992
993/*#define POOR_MANS_STRACE 1*/
994
995#ifdef POOR_MANS_STRACE
996	/* We've pushed all the registers now, so only r2-r4 hold anything
997	 * useful. Move them into callee save registers */
998	or	r2, ZERO, r28
999	or	r3, ZERO, r29
1000	or	r4, ZERO, r30
1001
1002	/* Preserve r2 as the event code */
1003	_loada	evt_debug, r3
1004	ori	r3, 1, r3
1005	ptabs	r3, t0
1006
1007	/* or	SP, ZERO, r5 */
1008	getcon	TRA, r5
1009	blink	t0, LINK
1010
1011	or	r28, ZERO, r2
1012	or	r29, ZERO, r3
1013	or	r30, ZERO, r4
1014#endif
1015
1016
1017	/* For syscall and debug race condition, get TRA now */
1018	getcon	TRA, r5
1019
1020	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
1021	 * Also set FD, to catch FPU usage in the kernel.
1022	 *
1023	 * benedict.gaster@superh.com 29/07/2002
1024	 *
1025	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
1026	 * same time change BL from 1->0, as any pending interrupt of a level
1027	 * higher than he previous value of IMASK will leak through and be
1028	 * taken unexpectedly.
1029	 *
1030	 * To avoid this we raise the IMASK and then issue another PUTCON to
1031	 * enable interrupts.
1032         */
1033	getcon	SR, r6
1034	movi	SR_IMASK | SR_FD, r7
1035	or	r6, r7, r6
1036	putcon	r6, SR
1037	movi	SR_UNBLOCK_EXC, r7
1038	and	r6, r7, r6
1039	putcon	r6, SR
1040
1041
1042	/* Now call the appropriate 3rd level handler */
1043	or	r3, ZERO, LINK
1044	_loada	trap_jtable, r3
1045	shlri	r2, 3, r2
1046	ldx.l	r2, r3, r3
1047	shlri	r2, 2, r2
1048	ptabs	r3, t0
1049	or	SP, ZERO, r3
1050	blink	t0, ZERO
1051
1052/*
1053 * Second level handler for VBR-based exceptions. Post-handlers.
1054 *
1055 * Post-handlers for interrupts (ret_from_irq), exceptions
1056 * (ret_from_exception) and common reentrance doors (restore_all
1057 * to get back to the original context, ret_from_syscall loop to
1058 * check kernel exiting).
1059 *
1060 * ret_with_reschedule and check_signals are an inner lables of
1061 * the ret_from_syscall loop.
1062 *
1063 * In common to all stack-frame sensitive handlers.
1064 *
1065 * Inputs:
1066 * (SP)   struct pt_regs *, original register's frame pointer (basic)
1067 *
1068 */
1069ret_from_irq:
1070	ld.q	SP, FRAME_S(FSSR), r6
1071	shlri	r6, 30, r6
1072	andi	r6, 1, r6
1073	_ptar	restore_all, t0
1074	bne	r6, ZERO, t0		/* no further checks */
1075	STI()
1076	_ptar	ret_with_reschedule, t0
1077	blink	t0, ZERO		/* Do not check softirqs */
1078
1079ret_from_exception:
1080	ld.q	SP, FRAME_S(FSSR), r6
1081	shlri	r6, 30, r6
1082	andi	r6, 1, r6
1083	_ptar	restore_all, t0
1084	bne	r6, ZERO, t0		/* no further checks */
1085
1086	/* Check softirqs */
1087
1088	/*
1089	 * Fall-through:
1090	 * _ptar   ret_from_syscall, t0
1091	 * blink   t0, ZERO
1092	 */
1093
1094ret_from_syscall:
1095
1096ret_with_reschedule:
1097	getcon	KCR0, r6
1098	ld.l	r6, need_resched, r7
1099	_ptar	check_signals, t0
1100	beq	r7, ZERO, t0
1101
1102	_ptar	ret_from_syscall, t0
1103	gettr	t0, LINK
1104	_loada	schedule, r6
1105	ptabs	r6, t0
1106	blink	t0, ZERO		/* Call schedule(), return on top */
1107
1108check_signals:
1109	getcon	KCR0, r6
1110	ld.l	r6, sigpending, r7
1111	_ptar	restore_all, t0
1112	beq	r7, ZERO, t0
1113
1114	_loada	do_signal, r6
1115	ptabs	r6, t0
1116	or	SP, ZERO, r2
1117	or	ZERO, ZERO, r3
1118	blink	t0, LINK	    /* Call do_signal(regs, 0), return here */
1119
1120#ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
1121	/* Check page tables before returning (with obvious performance penalty). */
1122	_loada  audit_mm, r6
1123	ptabs   r6, t0
1124	getcon  expevt, r2
1125	getcon  intevt, r3
1126	getcon  tra, r4
1127	getcon  dcr, r5
1128	blink   t0, LINK
1129#endif
1130
1131restore_all:
1132	/* Do prefetches */
1133
1134	ld.q	SP, FRAME_T(0), r6
1135	ld.q	SP, FRAME_T(1), r7
1136	ld.q	SP, FRAME_T(2), r8
1137	ld.q	SP, FRAME_T(3), r9
1138	ptabs	r6, t0
1139	ptabs	r7, t1
1140	ptabs	r8, t2
1141	ptabs	r9, t3
1142	ld.q	SP, FRAME_T(4), r6
1143	ld.q	SP, FRAME_T(5), r7
1144	ld.q	SP, FRAME_T(6), r8
1145	ld.q	SP, FRAME_T(7), r9
1146	ptabs	r6, t4
1147	ptabs	r7, t5
1148	ptabs	r8, t6
1149	ptabs	r9, t7
1150
1151	ld.q	SP, FRAME_R(0), r0
1152	ld.q	SP, FRAME_R(1), r1
1153	ld.q	SP, FRAME_R(2), r2
1154	ld.q	SP, FRAME_R(3), r3
1155	ld.q	SP, FRAME_R(4), r4
1156	ld.q	SP, FRAME_R(5), r5
1157	ld.q	SP, FRAME_R(6), r6
1158	ld.q	SP, FRAME_R(7), r7
1159	ld.q	SP, FRAME_R(8), r8
1160	ld.q	SP, FRAME_R(9), r9
1161	ld.q	SP, FRAME_R(10), r10
1162	ld.q	SP, FRAME_R(11), r11
1163	ld.q	SP, FRAME_R(12), r12
1164	ld.q	SP, FRAME_R(13), r13
1165	ld.q	SP, FRAME_R(14), r14
1166
1167	ld.q	SP, FRAME_R(16), r16
1168	ld.q	SP, FRAME_R(17), r17
1169	ld.q	SP, FRAME_R(18), r18
1170	ld.q	SP, FRAME_R(19), r19
1171	ld.q	SP, FRAME_R(20), r20
1172	ld.q	SP, FRAME_R(21), r21
1173	ld.q	SP, FRAME_R(22), r22
1174	ld.q	SP, FRAME_R(23), r23
1175	ld.q	SP, FRAME_R(24), r24
1176	ld.q	SP, FRAME_R(25), r25
1177	ld.q	SP, FRAME_R(26), r26
1178	ld.q	SP, FRAME_R(27), r27
1179	ld.q	SP, FRAME_R(28), r28
1180	ld.q	SP, FRAME_R(29), r29
1181	ld.q	SP, FRAME_R(30), r30
1182	ld.q	SP, FRAME_R(31), r31
1183	ld.q	SP, FRAME_R(32), r32
1184	ld.q	SP, FRAME_R(33), r33
1185	ld.q	SP, FRAME_R(34), r34
1186	ld.q	SP, FRAME_R(35), r35
1187	ld.q	SP, FRAME_R(36), r36
1188	ld.q	SP, FRAME_R(37), r37
1189	ld.q	SP, FRAME_R(38), r38
1190	ld.q	SP, FRAME_R(39), r39
1191	ld.q	SP, FRAME_R(40), r40
1192	ld.q	SP, FRAME_R(41), r41
1193	ld.q	SP, FRAME_R(42), r42
1194	ld.q	SP, FRAME_R(43), r43
1195	ld.q	SP, FRAME_R(44), r44
1196	ld.q	SP, FRAME_R(45), r45
1197	ld.q	SP, FRAME_R(46), r46
1198	ld.q	SP, FRAME_R(47), r47
1199	ld.q	SP, FRAME_R(48), r48
1200	ld.q	SP, FRAME_R(49), r49
1201	ld.q	SP, FRAME_R(50), r50
1202	ld.q	SP, FRAME_R(51), r51
1203	ld.q	SP, FRAME_R(52), r52
1204	ld.q	SP, FRAME_R(53), r53
1205	ld.q	SP, FRAME_R(54), r54
1206	ld.q	SP, FRAME_R(55), r55
1207	ld.q	SP, FRAME_R(56), r56
1208	ld.q	SP, FRAME_R(57), r57
1209	ld.q	SP, FRAME_R(58), r58
1210
1211	getcon	SR, r59
1212	movi	SR_BLOCK_EXC, r60
1213	or	r59, r60, r59
1214	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
1215	ld.q	SP, FRAME_S(FSSR), r61
1216	ld.q	SP, FRAME_S(FSPC), r62
1217	movi	SR_ASID_MASK, r60
1218	and	r59, r60, r59
1219	andc	r61, r60, r61		/* Clear out older ASID */
1220	or	r59, r61, r61		/* Retain current ASID */
1221	putcon	r61, SSR
1222	putcon	r62, SPC
1223
1224	/* Ignore FSYSCALL_ID */
1225
1226	ld.q	SP, FRAME_R(59), r59
1227	ld.q	SP, FRAME_R(60), r60
1228	ld.q	SP, FRAME_R(61), r61
1229	ld.q	SP, FRAME_R(62), r62
1230
1231	/* Last touch */
1232	ld.q	SP, FRAME_R(15), SP
1233	rte
1234	nop
1235
1236/*
1237 * Third level handlers for VBR-based exceptions. Adapting args to
1238 * and/or deflecting to fourth level handlers.
1239 *
1240 * Fourth level handlers interface.
1241 * Most are C-coded handlers directly pointed by the trap_jtable.
1242 * (Third = Fourth level)
1243 * Inputs:
1244 * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1245 *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1246 * (r3)   struct pt_regs *, original register's frame pointer
1247 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1248 * (r5)   TRA control register (for syscall/debug benefit only)
1249 * (LINK) return address
1250 * (SP)   = r3
1251 *
1252 * Kernel TLB fault handlers will get a slightly different interface.
1253 * (r2)   struct pt_regs *, original register's frame pointer
1254 * (r3)   writeaccess, whether it's a store fault as opposed to load fault
1255 * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
1256 * (r5)   Effective Address of fault
1257 * (LINK) return address
1258 * (SP)   = r2
1259 *
1260 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1261 *
1262 */
1263tlb_miss_load:
1264	or	SP, ZERO, r2
1265	or	ZERO, ZERO, r3		/* Read */
1266	or	ZERO, ZERO, r4		/* Data */
1267	getcon	TEA, r5
1268	_ptar	call_do_page_fault, t0
1269	beq	ZERO, ZERO, tr0
1270
1271tlb_miss_store:
1272	or	SP, ZERO, r2
1273	movi	1, r3			/* Write */
1274	or	ZERO, ZERO, r4		/* Data */
1275	getcon	TEA, r5
1276	_ptar	call_do_page_fault, t0
1277	beq	ZERO, ZERO, tr0
1278
1279itlb_miss_or_IRQ:
1280	_ptar	its_IRQ, t0
1281	beqi/u	r4, EVENT_INTERRUPT, t0
1282	or	SP, ZERO, r2
1283	or	ZERO, ZERO, r3		/* Read */
1284	movi	1, r4			/* Text */
1285	getcon	TEA, r5
1286	/* Fall through */
1287
1288call_do_page_fault:
1289	_loada	do_page_fault, r6
1290        ptabs	r6, t0
1291        blink	t0, ZERO
1292
1293fpu_error_or_IRQA:
1294	_ptar	its_IRQ, t0
1295	beqi/l	r4, EVENT_INTERRUPT, t0
1296#ifndef CONFIG_NOFPU_SUPPORT
1297	_loada	do_fpu_state_restore, r6
1298#else
1299	_loada	do_exception_error, r6
1300#endif
1301	ptabs	r6, t0
1302	blink	t0, ZERO
1303
1304fpu_error_or_IRQB:
1305	_ptar	its_IRQ, t0
1306	beqi/l	r4, EVENT_INTERRUPT, t0
1307#ifndef CONFIG_NOFPU_SUPPORT
1308	_loada	do_fpu_state_restore, r6
1309#else
1310	_loada	do_exception_error, r6
1311#endif
1312	ptabs	r6, t0
1313	blink	t0, ZERO
1314
1315its_IRQ:
1316	_loada	do_IRQ, r6
1317	ptabs	r6, t0
1318	blink	t0, ZERO
1319
1320/*
1321 * system_call/unknown_trap third level handler:
1322 *
1323 * Inputs:
1324 * (r2)   fault/interrupt code, entry number (TRAP = 11)
1325 * (r3)   struct pt_regs *, original register's frame pointer
1326 * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1327 * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1328 * (SP)   = r3
1329 * (LINK) return address: ret_from_exception
1330 * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1331 *
1332 * Outputs:
1333 * (*r3)  Syscall reply (Saved r2)
1334 * (LINK) In case of syscall only it can be scrapped.
1335 *        Common second level post handler will be ret_from_syscall.
1336 *        Common (non-trace) exit point to that is syscall_ret (saving
1337 *        result to r2). Common bad exit point is syscall_bad (returning
1338 *        ENOSYS then saved to r2).
1339 *
1340 */
1341
1342unknown_trap:
1343	/* Unknown Trap or User Trace */
1344	_loada	do_unknown_trapa, r6
1345	ptabs	r6, t0
1346        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
1347        andi    r2, 0x1ff, r2		/* r2 = syscall # */
1348	blink	t0, LINK
1349
1350	_ptar	syscall_ret, t0
1351	blink	t0, ZERO
1352
1353        /* New syscall implementation*/
1354system_call:
1355	_ptar	unknown_trap, t0
1356        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1357        shlri   r4, 20, r4
1358	bnei	r4, 1, t0		/* unknown_trap if not 0x1yzzzz */
1359
1360        /* It's a system call */
1361	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
1362	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
1363
1364	STI()
1365
1366	_ptar	syscall_allowed, t0
1367	movi	NR_syscalls - 1, r4	/* Last valid */
1368	bgeu/l	r4, r5, t0
1369
1370syscall_bad:
1371	/* Return ENOSYS ! */
1372	movi	-(ENOSYS), r2		/* Fall-through */
1373syscall_ret:
1374	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
1375
1376#ifdef POOR_MANS_STRACE
1377	/* nothing useful in registers at this point */
1378
1379	_loada	evt_debug2, r5
1380	ori	r5, 1, r5
1381	ptabs	r5, t0
1382	ld.q	SP, FRAME_R(9), r2
1383	or	SP, ZERO, r3
1384	blink	t0, LINK
1385#endif
1386
1387	ld.q	SP, FRAME_S(FSPC), r2
1388	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1389	st.q	SP, FRAME_S(FSPC), r2
1390	_ptar	ret_from_syscall, t0
1391	blink	t0, ZERO
1392
1393
1394/*  A different return path for ret_from_fork, because we now need
1395 *  to call schedule_tail with the later kernels. Because prev is
1396 *  loaded into r2 by switch_to() means we can just call it straight  away
1397 */
1398
1399.global	ret_from_fork
1400ret_from_fork:
1401
1402	_loada	schedule_tail,r5
1403	ori	r5, 1, r5
1404	ptabs	r5, t0
1405	blink	t0, LINK
1406
1407#ifdef POOR_MANS_STRACE
1408	/* nothing useful in registers at this point */
1409
1410	_loada	evt_debug2, r5
1411	ori	r5, 1, r5
1412	ptabs	r5, t0
1413	ld.q	SP, FRAME_R(9), r2
1414	or	SP, ZERO, r3
1415	blink	t0, LINK
1416#endif
1417
1418	ld.q	SP, FRAME_S(FSPC), r2
1419	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1420	st.q	SP, FRAME_S(FSPC), r2
1421	_ptar	ret_from_syscall, t0
1422	blink	t0, ZERO
1423
1424
1425
1426syscall_allowed:
1427	/* Use LINK to deflect the exit point, default is syscall_ret */
1428	_ptar	syscall_ret, t0
1429	gettr	t0, LINK
1430	_ptar	syscall_notrace, t0
1431
1432	getcon	KCR0, r2
1433	ld.l	r2, ptrace, r4
1434	andi	r4, PT_TRACESYS, r4
1435	beq/l	r4, ZERO, t0
1436
1437	/* Trace it by calling syscall_trace before and after */
1438	_loada	syscall_trace, r4
1439	ptabs	r4, t0
1440	blink	t0, LINK
1441	/* Reload syscall number as r5 is trashed by syscall_trace */
1442	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
1443	andi	r5, 0x1ff, r5
1444
1445	_ptar	syscall_ret_trace, t0
1446	gettr	t0, LINK
1447
1448syscall_notrace:
1449	/* Now point to the appropriate 4th level syscall handler */
1450	_loada	sys_call_table, r4
1451	shlli	r5, 2, r5
1452	ldx.l	r4, r5, r5
1453	ptabs	r5, t0
1454
1455	/* Prepare original args */
1456	ld.q	SP, FRAME_R(2), r2
1457	ld.q	SP, FRAME_R(3), r3
1458	ld.q	SP, FRAME_R(4), r4
1459	ld.q	SP, FRAME_R(5), r5
1460	ld.q	SP, FRAME_R(6), r6
1461	ld.q	SP, FRAME_R(7), r7
1462
1463	/* And now the trick for those syscalls requiring regs * ! */
1464	or	SP, ZERO, r8
1465
1466	/* Call it */
1467	blink	t0, ZERO	/* LINK is already properly set */
1468
1469syscall_ret_trace:
1470	/* We get back here only if under trace */
1471	st.q	SP, FRAME_R(9), r2	/* Save return value */
1472
1473	/*  ... usage of a pt relative (_ptar _syscall_trace) fails on CDC */
1474	_loada	syscall_trace, LINK
1475	ptabs	LINK, t0
1476	blink	t0, LINK
1477
1478	/* This needs to be done after any syscall tracing */
1479	ld.q	SP, FRAME_S(FSPC), r2
1480	addi	r2, 4, r2	/* Move PC, being pre-execution event */
1481	st.q	SP, FRAME_S(FSPC), r2
1482
1483	_ptar	ret_from_syscall, t0
1484	blink	t0, ZERO		/* Resume normal return sequence */
1485
1486/*
1487 * --- Switch to running under a particular ASID and return the previous ASID value
1488 * --- The caller is assumed to have done a cli before calling this.
1489 *
1490 * Input r2 : new ASID
1491 * Output r2 : old ASID
1492 */
1493
1494	.global switch_and_save_asid
1495switch_and_save_asid:
1496	getcon	sr, r0
1497	movi	255, r4
1498	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
1499	and	r0, r4, r3	/* r3 = shifted old ASID */
1500	andi	r2, 255, r2	/* mask down new ASID */
1501	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
1502	andc	r0, r4, r0	/* efface old ASID from SR */
1503	or	r0, r2, r0	/* insert the new ASID */
1504	putcon	r0, ssr
1505	_loada	1f, r0
1506	putcon	r0, spc
1507	rte
1508	nop
15091:
1510	ptabs	r18, tr0
1511	shlri	r3, 16, r2	/* r2 = old ASID */
1512	blink tr0, r63
1513
1514	.global	route_to_panic_handler
1515route_to_panic_handler:
1516	/* Switch to real mode, goto panic_handler, don't return.  Useful for
1517	   last-chance debugging, e.g. if no output wants to go to the console.
1518	   */
1519
1520	_loada	panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1521	ptabs	r1, tr0
1522	pta	1f, tr1
1523	gettr	tr1, r0
1524	putcon	r0, spc
1525	getcon	sr, r0
1526	movi	1, r1
1527	shlli	r1, 31, r1
1528	andc	r0, r1, r0
1529	putcon	r0, ssr
1530	rte
1531	nop
15321:	/* Now in real mode */
1533	blink tr0, r63
1534	nop
1535
1536	.global peek_real_address_q
1537peek_real_address_q:
1538	/* Two args:
1539	   r2 : real mode address to peek
1540	   r2(out) : result quadword
1541
1542	   This is provided as a cheapskate way of manipulating device
1543	   registers for debugging (to avoid the need to onchip_remap the debug
1544	   module, and to avoid the need to onchip_remap the watchpoint
1545	   controller in a way that identity maps sufficient bits to avoid the
1546	   SH5-101 cut2 silicon defect).
1547
1548	   This code is not performance critical
1549	*/
1550
1551	add.l	r2, r63, r2	/* sign extend address */
1552	getcon	sr, r0		/* r0 = saved original SR */
1553	movi	1, r1
1554	shlli	r1, 28, r1
1555	or	r0, r1, r1	/* r0 with block bit set */
1556	putcon	r1, sr		/* now in critical section */
1557	movi	1, r36
1558	shlli	r36, 31, r36
1559	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1560
1561	putcon	r1, ssr
1562	_loada	.peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1563	_loada	1f, r37		/* virtual mode return addr */
1564	putcon	r36, spc
1565
1566	synco
1567	rte
1568	nop
1569
1570.peek0:	/* come here in real mode, don't touch caches!!
1571           still in critical section (sr.bl==1) */
1572	putcon	r0, ssr
1573	putcon	r37, spc
1574	/* Here's the actual peek.  If the address is bad, all bets are now off
1575	 * what will happen (handlers invoked in real-mode = bad news) */
1576	ld.q	r2, 0, r2
1577	synco
1578	rte	/* Back to virtual mode */
1579	nop
1580
15811:
1582	ptabs	r18, tr0
1583	blink	tr0, r63
1584
1585	.global poke_real_address_q
1586poke_real_address_q:
1587	/* Two args:
1588	   r2 : real mode address to poke
1589	   r3 : quadword value to write.
1590
1591	   This is provided as a cheapskate way of manipulating device
1592	   registers for debugging (to avoid the need to onchip_remap the debug
1593	   module, and to avoid the need to onchip_remap the watchpoint
1594	   controller in a way that identity maps sufficient bits to avoid the
1595	   SH5-101 cut2 silicon defect).
1596
1597	   This code is not performance critical
1598	*/
1599
1600	add.l	r2, r63, r2	/* sign extend address */
1601	getcon	sr, r0		/* r0 = saved original SR */
1602	movi	1, r1
1603	shlli	r1, 28, r1
1604	or	r0, r1, r1	/* r0 with block bit set */
1605	putcon	r1, sr		/* now in critical section */
1606	movi	1, r36
1607	shlli	r36, 31, r36
1608	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1609
1610	putcon	r1, ssr
1611	_loada	.poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1612	_loada	1f, r37		/* virtual mode return addr */
1613	putcon	r36, spc
1614
1615	synco
1616	rte
1617	nop
1618
1619.poke0:	/* come here in real mode, don't touch caches!!
1620           still in critical section (sr.bl==1) */
1621	putcon	r0, ssr
1622	putcon	r37, spc
1623	/* Here's the actual poke.  If the address is bad, all bets are now off
1624	 * what will happen (handlers invoked in real-mode = bad news) */
1625	st.q	r2, 0, r3
1626	synco
1627	rte	/* Back to virtual mode */
1628	nop
1629
16301:
1631	ptabs	r18, tr0
1632	blink	tr0, r63
1633
1634/*
1635 * --- User Access Handling Section
1636 */
1637
1638/*
1639 * User Access support. It all moved to non inlined Assembler
1640 * functions in here.
1641 *
1642 * __kernel_size_t __copy_user(void *__to, const void *__from,
1643 *			       __kernel_size_t __n)
1644 *
1645 * Inputs:
1646 * (r2)  target address
1647 * (r3)  source address
1648 * (r4)  size in bytes
1649 *
1650 * Ouputs:
1651 * (*r2) target data
1652 * (r2)  non-copied bytes
1653 *
1654 * If a fault occurs on the user pointer, bail out early and return the
1655 * number of bytes not copied in r2.
1656 * Strategy : for large blocks, call a real memcpy function which can
1657 * move >1 byte at a time using unaligned ld/st instructions, and can
1658 * manipulate the cache using prefetch + alloco to improve the speed
1659 * further.  If a fault occurs in that function, just revert to the
1660 * byte-by-byte approach used for small blocks; this is rare so the
1661 * performance hit for that case does not matter.
1662 *
1663 * For small blocks it's not worth the overhead of setting up and calling
1664 * the memcpy routine; do the copy a byte at a time.
1665 *
1666 */
1667	.global	__copy_user
1668__copy_user:
1669	_ptar	__copy_user_byte_by_byte, t1
1670	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
1671	bge/u	r0, r4, t1
1672	_ptar copy_user_memcpy, t0
1673	addi	r15, -32, r15
1674	/* Save arguments in case we have to fix-up unhandled page fault */
1675	st.q	r15, 0, r2
1676	st.q	r15, 8, r3
1677	st.q	r15, 16, r4
1678	st.q	r15, 24, r35 ! r35 is callee-save
1679	/* Save LINK in a register to reduce RTS time later (otherwise
1680	   ld r15,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1681	ori	LINK, 0, r35
1682	blink	t0, LINK
1683
1684	/* Copy completed normally if we get back here */
1685	ptabs	r35, tr0
1686	ld.q	r15, 24, r35
1687	/* don't restore r2-r4, pointless */
1688	/* set result=r2 to zero as the copy must have succeeded. */
1689	or	r63, r63, r2
1690	addi	r15, 32, r15
1691	blink	tr0, r63 ! RTS
1692
1693	.global __copy_user_fixup
1694__copy_user_fixup:
1695	/* Restore stack frame */
1696	ori	r35, 0, LINK
1697	ld.q	r15, 24, r35
1698	ld.q	r15, 16, r4
1699	ld.q	r15,  8, r3
1700	ld.q	r15,  0, r2
1701	addi	r15, 32, r15
1702	/* Fall through to original code, in the 'same' state we entered with */
1703
1704/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1705   user address.  In that rare case, the speed drop can be tolerated. */
1706__copy_user_byte_by_byte:
1707	_ptar	___copy_user_exit, t1
1708	_ptar	___copy_user1, t0
1709	beq/u	r4, r63, t1	/* early exit for zero length copy */
1710	sub	r2, r3, r0
1711	addi	r0, -1, r0
1712
1713___copy_user1:
1714	ld.b	r3, 0, r5		/* Fault address 1 */
1715
1716	/* Could rewrite this to use just 1 add, but the second comes 'free'
1717	   due to load latency */
1718	addi	r3, 1, r3
1719	addi	r4, -1, r4		/* No real fixup required */
1720___copy_user2:
1721	stx.b	r3, r0, r5		/* Fault address 2 */
1722	bne     r4, ZERO, t0
1723
1724___copy_user_exit:
1725	or	r4, ZERO, r2
1726	ptabs	LINK, t0
1727	blink	t0, ZERO
1728
1729/*
1730 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1731 *
1732 * Inputs:
1733 * (r2)  target address
1734 * (r3)  size in bytes
1735 *
1736 * Ouputs:
1737 * (*r2) zero-ed target data
1738 * (r2)  non-zero-ed bytes
1739 */
1740	.global	__clear_user
1741__clear_user:
1742	_ptar	___clear_user_exit, t1
1743	_ptar	___clear_user1, t0
1744	beq/u	r3, r63, t1
1745
1746___clear_user1:
1747	st.b	r2, 0, ZERO		/* Fault address */
1748	addi	r2, 1, r2
1749	addi	r3, -1, r3		/* No real fixup required */
1750	bne     r3, ZERO, t0
1751
1752___clear_user_exit:
1753	or	r3, ZERO, r2
1754	ptabs	LINK, t0
1755	blink	t0, ZERO
1756
1757
1758/*
1759 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1760 *			   int __count)
1761 *
1762 * Inputs:
1763 * (r2)  target address
1764 * (r3)  source address
1765 * (r4)  maximum size in bytes
1766 *
1767 * Ouputs:
1768 * (*r2) copied data
1769 * (r2)  -EFAULT (in case of faulting)
1770 *       copied data (otherwise)
1771 */
1772	.global	__strncpy_from_user
1773__strncpy_from_user:
1774	_ptar	___strncpy_from_user1, t0
1775	_ptar	___strncpy_from_user_done, t1
1776	or	r4, ZERO, r5		/* r5 = original count */
1777	beq/u	r4, r63, t1		/* early exit if r4==0 */
1778	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1779	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1780
1781___strncpy_from_user1:
1782	ld.b	r3, 0, r7		/* Fault address: only in reading */
1783	st.b	r2, 0, r7
1784	addi	r2, 1, r2
1785	addi	r3, 1, r3
1786	beq/u	ZERO, r7, t1
1787	addi	r4, -1, r4		/* return real number of copied bytes */
1788	bne/l	ZERO, r4, t0
1789
1790___strncpy_from_user_done:
1791	sub	r5, r4, r6		/* If done, return copied */
1792
1793___strncpy_from_user_exit:
1794	or	r6, ZERO, r2
1795	ptabs	LINK, t0
1796	blink	t0, ZERO
1797
1798/*
1799 * extern long __strnlen_user(const char *__s, long __n)
1800 *
1801 * Inputs:
1802 * (r2)  source address
1803 * (r3)  source size in bytes
1804 *
1805 * Ouputs:
1806 * (r2)  -EFAULT (in case of faulting)
1807 *       string length (otherwise)
1808 */
1809	.global	__strnlen_user
1810__strnlen_user:
1811	_ptar	___strnlen_user_set_reply, t0
1812	_ptar	___strnlen_user1, t1
1813	or	ZERO, ZERO, r5		/* r5 = counter */
1814	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1815	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1816	beq	r3, ZERO, t0
1817
1818___strnlen_user1:
1819	ldx.b	r2, r5, r7		/* Fault address: only in reading */
1820	addi	r3, -1, r3		/* No real fixup */
1821	addi	r5, 1, r5
1822	beq	r3, ZERO, t0
1823	bne	r7, ZERO, t1
1824! The line below used to be active.  This meant led to a junk byte lying between each pair
1825! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
1826! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1827! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1828!	addi	r5, 1, r5		/* Include '\0' */
1829
1830___strnlen_user_set_reply:
1831	or	r5, ZERO, r6		/* If done, return counter */
1832
1833___strnlen_user_exit:
1834	or	r6, ZERO, r2
1835	ptabs	LINK, t0
1836	blink	t0, ZERO
1837
1838/*
1839 * extern long __get_user_asm_?(void *val, long addr)
1840 *
1841 * Inputs:
1842 * (r2)  dest address
1843 * (r3)  source address (in User Space)
1844 *
1845 * Ouputs:
1846 * (r2)  -EFAULT (faulting)
1847 *       0 	 (not faulting)
1848 */
1849	.global	__get_user_asm_b
1850__get_user_asm_b:
1851	or	r2, ZERO, r4
1852	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1853
1854___get_user_asm_b1:
1855	ld.b	r3, 0, r5		/* r5 = data */
1856	st.b	r4, 0, r5
1857	or	ZERO, ZERO, r2
1858
1859___get_user_asm_b_exit:
1860	ptabs	LINK, t0
1861	blink	t0, ZERO
1862
1863
1864	.global	__get_user_asm_w
1865__get_user_asm_w:
1866	or	r2, ZERO, r4
1867	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1868
1869___get_user_asm_w1:
1870	ld.w	r3, 0, r5		/* r5 = data */
1871	st.w	r4, 0, r5
1872	or	ZERO, ZERO, r2
1873
1874___get_user_asm_w_exit:
1875	ptabs	LINK, t0
1876	blink	t0, ZERO
1877
1878
1879	.global	__get_user_asm_l
1880__get_user_asm_l:
1881	or	r2, ZERO, r4
1882	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1883
1884___get_user_asm_l1:
1885	ld.l	r3, 0, r5		/* r5 = data */
1886	st.l	r4, 0, r5
1887	or	ZERO, ZERO, r2
1888
1889___get_user_asm_l_exit:
1890	ptabs	LINK, t0
1891	blink	t0, ZERO
1892
1893
1894	.global	__get_user_asm_q
1895__get_user_asm_q:
1896	or	r2, ZERO, r4
1897	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1898
1899___get_user_asm_q1:
1900	ld.q	r3, 0, r5		/* r5 = data */
1901	st.q	r4, 0, r5
1902	or	ZERO, ZERO, r2
1903
1904___get_user_asm_q_exit:
1905	ptabs	LINK, t0
1906	blink	t0, ZERO
1907
1908/*
1909 * extern long __put_user_asm_?(void *pval, long addr)
1910 *
1911 * Inputs:
1912 * (r2)  kernel pointer to value
1913 * (r3)  dest address (in User Space)
1914 *
1915 * Ouputs:
1916 * (r2)  -EFAULT (faulting)
1917 *       0 	 (not faulting)
1918 */
1919	.global	__put_user_asm_b
1920__put_user_asm_b:
1921	ld.b	r2, 0, r4		/* r4 = data */
1922	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1923
1924___put_user_asm_b1:
1925	st.b	r3, 0, r4
1926	or	ZERO, ZERO, r2
1927
1928___put_user_asm_b_exit:
1929	ptabs	LINK, t0
1930	blink	t0, ZERO
1931
1932
1933	.global	__put_user_asm_w
1934__put_user_asm_w:
1935	ld.w	r2, 0, r4		/* r4 = data */
1936	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1937
1938___put_user_asm_w1:
1939	st.w	r3, 0, r4
1940	or	ZERO, ZERO, r2
1941
1942___put_user_asm_w_exit:
1943	ptabs	LINK, t0
1944	blink	t0, ZERO
1945
1946
1947	.global	__put_user_asm_l
1948__put_user_asm_l:
1949	ld.l	r2, 0, r4		/* r4 = data */
1950	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1951
1952___put_user_asm_l1:
1953	st.l	r3, 0, r4
1954	or	ZERO, ZERO, r2
1955
1956___put_user_asm_l_exit:
1957	ptabs	LINK, t0
1958	blink	t0, ZERO
1959
1960
1961	.global	__put_user_asm_q
1962__put_user_asm_q:
1963	ld.q	r2, 0, r4		/* r4 = data */
1964	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1965
1966___put_user_asm_q1:
1967	st.q	r3, 0, r4
1968	or	ZERO, ZERO, r2
1969
1970___put_user_asm_q_exit:
1971	ptabs	LINK, t0
1972	blink	t0, ZERO
1973
1974
1975/*
1976 * --- Signal Handling Section
1977 */
1978
1979/*
1980 * extern long long _sa_default_rt_restorer
1981 * extern long long _sa_default_restorer
1982 *
1983 *		 or, better,
1984 *
1985 * extern void _sa_default_rt_restorer(void)
1986 * extern void _sa_default_restorer(void)
1987 *
1988 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1989 * from user space. Copied into user space by signal management.
1990 * Both must be quad aligned and 2 quad long (4 instructions).
1991 *
1992 */
1993	.balign 8
1994	.global sa_default_rt_restorer
1995sa_default_rt_restorer:
1996	movi	0x10, r9
1997	shori	__NR_rt_sigreturn, r9
1998	trapa	r9
1999	nop
2000
2001	.balign 8
2002	.global sa_default_restorer
2003sa_default_restorer:
2004	movi	0x10, r9
2005	shori	__NR_sigreturn, r9
2006	trapa	r9
2007	nop
2008
2009/*
2010 * --- __ex_table Section
2011 */
2012
2013/*
2014 * User Access Exception Table.
2015 */
2016	.section	__ex_table,  "a"
2017
2018	.global asm_uaccess_start	/* Just a marker */
2019asm_uaccess_start:
2020
2021	.long	___copy_user1, ___copy_user_exit
2022	.long	___copy_user2, ___copy_user_exit
2023	.long	___clear_user1, ___clear_user_exit
2024	.long	___strncpy_from_user1, ___strncpy_from_user_exit
2025	.long	___strnlen_user1, ___strnlen_user_exit
2026	.long	___get_user_asm_b1, ___get_user_asm_b_exit
2027	.long	___get_user_asm_w1, ___get_user_asm_w_exit
2028	.long	___get_user_asm_l1, ___get_user_asm_l_exit
2029	.long	___get_user_asm_q1, ___get_user_asm_q_exit
2030	.long	___put_user_asm_b1, ___put_user_asm_b_exit
2031	.long	___put_user_asm_w1, ___put_user_asm_w_exit
2032	.long	___put_user_asm_l1, ___put_user_asm_l_exit
2033	.long	___put_user_asm_q1, ___put_user_asm_q_exit
2034
2035	.global asm_uaccess_end		/* Just a marker */
2036asm_uaccess_end:
2037
2038
2039
2040
2041/*
2042 * --- .text.init Section
2043 */
2044
2045	.section	.text.init, "ax"
2046
2047/*
2048 * void trap_init (void)
2049 *
2050 */
2051	.global	trap_init
2052trap_init:
2053	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
2054	st.q	SP, 0, r28
2055	st.q	SP, 8, r29
2056	st.q	SP, 16, r30
2057
2058	/* Set VBR and RESVEC */
2059	_loada	LVBR_block, r19
2060	andi	r19, -4, r19			/* reset MMUOFF + reserved */
2061	/* For RESVEC exceptions we force the MMU off, which means we need the
2062	   physical address. */
2063	_loada	LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2064	andi	r20, -4, r20			/* reset reserved */
2065	ori	r20, 1, r20			/* set MMUOFF */
2066	putcon	r19, VBR
2067	putcon	r20, RESVEC
2068
2069	/* Sanity check */
2070	_loada	LVBR_block_end, r21
2071	andi	r21, -4, r21
2072	movi	BLOCK_SIZE, r29			/* r29 = expected size */
2073	or	r19, ZERO, r30
2074	add	r19, r29, r19
2075
2076	/*
2077	 * Ugly, but better loop forever now than crash afterwards.
2078	 * We should print a message, but if we touch LVBR or
2079	 * LRESVEC blocks we should not be surprised if we get stuck
2080	 * in trap_init().
2081	 */
2082	_ptar	trap_init_loop, t1
2083	gettr	t1, r28				/* r28 = trap_init_loop */
2084	sub	r21, r30, r30			/* r30 = actual size */
2085
2086	/*
2087	 * VBR/RESVEC handlers overlap by being bigger than
2088	 * allowed. Very bad. Just loop forever.
2089	 * (r28) panic/loop address
2090	 * (r29) expected size
2091	 * (r30) actual size
2092	 */
2093trap_init_loop:
2094	bne	r19, r21, t1
2095
2096	/* Now that exception vectors are set up reset SR.BL */
2097	getcon 	SR, r22
2098	movi	SR_UNBLOCK_EXC, r23
2099	and	r22, r23, r22
2100	putcon	r22, SR
2101
2102	addi	SP, 24, SP
2103	ptabs	LINK, t0
2104	blink	t0, ZERO
2105
2106