1/*
2 * ia64/kernel/entry.S
3 *
4 * Kernel entry points.
5 *
6 * Copyright (C) 2002-2003
7 * 	Suresh Siddha <suresh.b.siddha@intel.com>
8 *	Fenghua Yu <fenghua.yu@intel.com>
9 * Copyright (C) 1998-2002 Hewlett-Packard Co
10 *	David Mosberger-Tang <davidm@hpl.hp.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
13 * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
14 * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
15 */
16/*
17 * ia64_switch_to now places correct virtual mapping in in TR2 for
18 * kernel stack. This allows us to handle interrupts without changing
19 * to physical mode.
20 *
21 * Jonathan Nicklin	<nicklin@missioncriticallinux.com>
22 * Patrick O'Rourke	<orourke@missioncriticallinux.com>
23 * 11/07/2000
24 /
25/*
26 * Global (preserved) predicate usage on syscall entry/exit path:
27 *
28 *	pKern:		See entry.h.
29 *	pUser:		See entry.h.
30 *	pSys:		See entry.h.
31 *	pNonSys:	!pSys
32 */
33
34#include <linux/config.h>
35
36#include <asm/cache.h>
37#include <asm/errno.h>
38#include <asm/kregs.h>
39#include <asm/offsets.h>
40#include <asm/processor.h>
41#include <asm/unistd.h>
42#include <asm/asmmacro.h>
43#include <asm/pgtable.h>
44
45#include "minstate.h"
46
47	/*
48	 * execve() is special because in case of success, we need to
49	 * setup a null register window frame.
50	 */
51ENTRY(ia64_execve)
52	/*
53	 * Allocate 8 input registers since ptrace() may clobber them
54	 */
55	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
56	alloc loc1=ar.pfs,8,2,4,0
57	/* Leave from kernel and restore all pt_regs to correspending registers. This is special
58	 * because ia32 application needs scratch registers after return from execve.
59	 */
60	movl loc0=ia64_ret_from_execve_syscall
61	.body
62	mov out0=in0			// filename
63	;;				// stop bit between alloc and call
64	mov out1=in1			// argv
65	mov out2=in2			// envp
66	add out3=16,sp			// regs
67	br.call.sptk.many rp=sys_execve
68.ret0:	cmp4.ge p6,p7=r8,r0
69	mov ar.pfs=loc1			// restore ar.pfs
70	sxt4 r8=r8			// return 64-bit result
71	;;
72	stf.spill [sp]=f0
73(p6)	cmp.ne pKern,pUser=r0,r0	// a successful execve() lands us in user-mode...
74	mov rp=loc0
75(p6)	mov ar.pfs=r0			// clear ar.pfs on success
76(p7)	br.ret.sptk.many rp
77
78	/*
79	 * In theory, we'd have to zap this state only to prevent leaking of
80	 * security sensitive state (e.g., if current->mm->dumpable is zero).  However,
81	 * this executes in less than 20 cycles even on Itanium, so it's not worth
82	 * optimizing for...).
83	 */
84	mov ar.unat=0; 		mov ar.lc=0;
85	mov r4=0;               mov f2=f0;              mov b1=r0
86	mov r5=0;               mov f3=f0;              mov b2=r0
87	mov r6=0;               mov f4=f0;              mov b3=r0
88	mov r7=0;               mov f5=f0;              mov b4=r0
89	ldf.fill f12=[sp];      mov f13=f0;             mov b5=r0
90	ldf.fill f14=[sp];      ldf.fill f15=[sp];      mov f16=f0
91	ldf.fill f17=[sp];      ldf.fill f18=[sp];      mov f19=f0
92	ldf.fill f20=[sp];      ldf.fill f21=[sp];      mov f22=f0
93	ldf.fill f23=[sp];      ldf.fill f24=[sp];      mov f25=f0
94	ldf.fill f26=[sp];      ldf.fill f27=[sp];      mov f28=f0
95	ldf.fill f29=[sp];      ldf.fill f30=[sp];      mov f31=f0
96	br.ret.sptk.many rp
97END(ia64_execve)
98
99GLOBAL_ENTRY(sys_clone2)
100	/*
101	 * Allocate 8 input registers since ptrace() may clobber them
102	 */
103	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
104	alloc r16=ar.pfs,8,2,4,0
105	DO_SAVE_SWITCH_STACK
106	mov loc0=rp
107	mov loc1=r16				// save ar.pfs across do_fork
108	.body
109	mov out1=in1
110	mov out3=in2
111	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
112	mov out0=in0				// out0 = clone_flags
113	br.call.sptk.many rp=do_fork
114.ret1:	.restore sp
115	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
116	mov ar.pfs=loc1
117	mov rp=loc0
118	br.ret.sptk.many rp
119END(sys_clone2)
120
121GLOBAL_ENTRY(sys_clone)
122	/*
123	 * Allocate 8 input registers since ptrace() may clobber them
124	 */
125	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
126	alloc r16=ar.pfs,8,2,4,0
127	DO_SAVE_SWITCH_STACK
128	mov loc0=rp
129	mov loc1=r16				// save ar.pfs across do_fork
130	.body
131	mov out1=in1
132	mov out3=16				// stacksize (compensates for 16-byte scratch area)
133	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
134	mov out0=in0				// out0 = clone_flags
135	br.call.sptk.many rp=do_fork
136.ret2:	.restore sp
137	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
138	mov ar.pfs=loc1
139	mov rp=loc0
140	br.ret.sptk.many rp
141END(sys_clone)
142
143/*
144 * prev_task <- ia64_switch_to(struct task_struct *next)
145 */
146GLOBAL_ENTRY(ia64_switch_to)
147	.prologue
148	alloc r16=ar.pfs,1,0,0,0
149	DO_SAVE_SWITCH_STACK
150	.body
151
152	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
153	mov r27=IA64_KR(CURRENT_STACK)
154	dep r20=0,in0,61,3		// physical address of "current"
155	;;
156	st8 [r22]=sp			// save kernel stack pointer of old task
157	shr.u r26=r20,IA64_GRANULE_SHIFT
158	shr.u r17=r20,KERNEL_TR_PAGE_SHIFT
159	;;
160	cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17
161	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
162	;;
163	/*
164	 * If we've already mapped this task's page, we can skip doing it again.
165	 */
166(p6)	cmp.eq p7,p6=r26,r27
167(p6)	br.cond.dpnt .map
168	;;
169.done:
170(p6)	ssm psr.ic			// if we we had to map, renable the psr.ic bit FIRST!!!
171	;;
172(p6)	srlz.d
173	ld8 sp=[r21]			// load kernel stack pointer of new task
174	mov IA64_KR(CURRENT)=r20	// update "current" application register
175	mov r8=r13			// return pointer to previously running task
176	mov r13=in0			// set "current" pointer
177	;;
178	ssm psr.i			// renable psr.i AFTER the ic bit is serialized
179	DO_LOAD_SWITCH_STACK
180
181#ifdef CONFIG_SMP
182	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
183#endif
184	br.ret.sptk.many rp		// boogie on out in new context
185
186.map:
187	rsm psr.i | psr.ic
188	movl r25=PAGE_KERNEL
189	;;
190	srlz.d
191	or r23=r25,r20			// construct PA | page properties
192	mov r25=IA64_GRANULE_SHIFT<<2
193	;;
194	mov cr.itir=r25
195	mov cr.ifa=in0			// VA of next task...
196	;;
197	mov r25=IA64_TR_CURRENT_STACK
198	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...
199	;;
200	itr.d dtr[r25]=r23		// wire in new mapping...
201	br.cond.sptk .done
202END(ia64_switch_to)
203
204/*
205 * Note that interrupts are enabled during save_switch_stack and
206 * load_switch_stack.  This means that we may get an interrupt with
207 * "sp" pointing to the new kernel stack while ar.bspstore is still
208 * pointing to the old kernel backing store area.  Since ar.rsc,
209 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
210 * this is not a problem.  Also, we don't need to specify unwind
211 * information for preserved registers that are not modified in
212 * save_switch_stack as the right unwind information is already
213 * specified at the call-site of save_switch_stack.
214 */
215
216/*
217 * save_switch_stack:
218 *	- r16 holds ar.pfs
219 *	- b7 holds address to return to
220 *	- rp (b0) holds return address to save
221 */
222GLOBAL_ENTRY(save_switch_stack)
223	.prologue
224	.altrp b7
225	flushrs			// flush dirty regs to backing store (must be first in insn group)
226	.save @priunat,r17
227	mov r17=ar.unat		// preserve caller's
228	.body
229	adds r3=80,sp
230	;;
231	lfetch.fault.excl.nt1 [r3],128
232	mov ar.rsc=0		// put RSE in mode: enforced lazy, little endian, pl 0
233	adds r2=16+128,sp
234	;;
235	lfetch.fault.excl.nt1 [r2],128
236	lfetch.fault.excl.nt1 [r3],128
237	adds r14=SW(R4)+16,sp
238	;;
239	lfetch.fault.excl [r2]
240	lfetch.fault.excl [r3]
241	adds r15=SW(R5)+16,sp
242	;;
243	mov r18=ar.fpsr		// preserve fpsr
244	mov r19=ar.rnat
245	add r2=SW(F2)+16,sp	// r2 = &sw->f2
246.mem.offset 0,0; st8.spill [r14]=r4,16		// spill r4
247.mem.offset 8,0; st8.spill [r15]=r5,16		// spill r5
248	add r3=SW(F3)+16,sp	// r3 = &sw->f3
249	;;
250	stf.spill [r2]=f2,32
251	stf.spill [r3]=f3,32
252	mov r21=b0
253.mem.offset 0,0; st8.spill [r14]=r6,16		// spill r6
254.mem.offset 8,0; st8.spill [r15]=r7,16		// spill r7
255	mov r22=b1
256	;;
257	// since we're done with the spills, read and save ar.unat:
258	mov r29=ar.unat		// M-unit
259	mov r20=ar.bspstore	// M-unit
260	mov r23=b2
261	stf.spill [r2]=f4,32
262	stf.spill [r3]=f5,32
263	mov r24=b3
264	;;
265	st8 [r14]=r21,16	// save b0
266	st8 [r15]=r22,16	// save b1
267	mov r25=b4
268	mov r26=b5
269	;;
270	st8 [r14]=r23,16	// save b2
271	st8 [r15]=r24,16	// save b3
272	mov r21=ar.lc		// I-unit
273	stf.spill [r2]=f12,32
274	stf.spill [r3]=f13,32
275	;;
276	st8 [r14]=r25,16	// save b4
277	st8 [r15]=r26,16	// save b5
278	stf.spill [r2]=f14,32
279	stf.spill [r3]=f15,32
280	;;
281	st8 [r14]=r16		// save ar.pfs
282	st8 [r15]=r21		// save ar.lc
283	stf.spill [r2]=f16,32
284	stf.spill [r3]=f17,32
285	;;
286	stf.spill [r2]=f18,32
287	stf.spill [r3]=f19,32
288	;;
289	stf.spill [r2]=f20,32
290	stf.spill [r3]=f21,32
291	;;
292	stf.spill [r2]=f22,32
293	stf.spill [r3]=f23,32
294	;;
295	stf.spill [r2]=f24,32
296	stf.spill [r3]=f25,32
297	add r14=SW(CALLER_UNAT)+16,sp
298	;;
299	stf.spill [r2]=f26,32
300	stf.spill [r3]=f27,32
301	add r15=SW(AR_FPSR)+16,sp
302	;;
303	stf.spill [r2]=f28,32
304	stf.spill [r3]=f29,32
305	st8 [r14]=r17		// save caller_unat
306	st8 [r15]=r18		// save fpsr
307	mov r21=pr
308	;;
309	stf.spill [r2]=f30,(SW(AR_UNAT)-SW(F30))
310	stf.spill [r3]=f31,(SW(AR_RNAT)-SW(F31))
311	;;
312	st8 [r2]=r29,16		// save ar.unat
313	st8 [r3]=r19,16		// save ar.rnat
314	;;
315	st8 [r2]=r20		// save ar.bspstore
316	st8 [r3]=r21		// save predicate registers
317	mov ar.rsc=3		// put RSE back into eager mode, pl 0
318	br.cond.sptk.many b7
319END(save_switch_stack)
320
321/*
322 * load_switch_stack:
323 *	- "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
324 *	- b7 holds address to return to
325 *	- must not touch r8-r11
326 */
327ENTRY(load_switch_stack)
328	.prologue
329	.altrp b7
330
331	.body
332	lfetch.fault.nt1 [sp]
333	adds r2=SW(AR_BSPSTORE)+16,sp
334	adds r3=SW(AR_UNAT)+16,sp
335	mov ar.rsc=0						// put RSE into enforced lazy mode
336	adds r14=SW(CALLER_UNAT)+16,sp
337	adds r15=SW(AR_FPSR)+16,sp
338	;;
339	ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))	// bspstore
340	ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))	// unat
341	;;
342	ld8 r21=[r2],16		// restore b0
343	ld8 r22=[r3],16		// restore b1
344	;;
345	ld8 r23=[r2],16		// restore b2
346	ld8 r24=[r3],16		// restore b3
347	;;
348	ld8 r25=[r2],16		// restore b4
349	ld8 r26=[r3],16		// restore b5
350	;;
351	ld8 r16=[r2],(SW(PR)-SW(AR_PFS))	// ar.pfs
352	ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))	// ar.lc
353	;;
354	ld8 r28=[r2]		// restore pr
355	ld8 r30=[r3]		// restore rnat
356	;;
357	ld8 r18=[r14],16	// restore caller's unat
358	ld8 r19=[r15],24	// restore fpsr
359	;;
360	ldf.fill f2=[r14],32
361	ldf.fill f3=[r15],32
362	;;
363	ldf.fill f4=[r14],32
364	ldf.fill f5=[r15],32
365	;;
366	ldf.fill f12=[r14],32
367	ldf.fill f13=[r15],32
368	;;
369	ldf.fill f14=[r14],32
370	ldf.fill f15=[r15],32
371	;;
372	ldf.fill f16=[r14],32
373	ldf.fill f17=[r15],32
374	;;
375	ldf.fill f18=[r14],32
376	ldf.fill f19=[r15],32
377	mov b0=r21
378	;;
379	ldf.fill f20=[r14],32
380	ldf.fill f21=[r15],32
381	mov b1=r22
382	;;
383	ldf.fill f22=[r14],32
384	ldf.fill f23=[r15],32
385	mov b2=r23
386	;;
387	mov ar.bspstore=r27
388	mov ar.unat=r29		// establish unat holding the NaT bits for r4-r7
389	mov b3=r24
390	;;
391	ldf.fill f24=[r14],32
392	ldf.fill f25=[r15],32
393	mov b4=r25
394	;;
395	ldf.fill f26=[r14],32
396	ldf.fill f27=[r15],32
397	mov b5=r26
398	;;
399	ldf.fill f28=[r14],32
400	ldf.fill f29=[r15],32
401	mov ar.pfs=r16
402	;;
403	ldf.fill f30=[r14],32
404	ldf.fill f31=[r15],24
405	mov ar.lc=r17
406	;;
407	ld8.fill r4=[r14],16
408	ld8.fill r5=[r15],16
409	mov pr=r28,-1
410	;;
411	ld8.fill r6=[r14],16
412	ld8.fill r7=[r15],16
413
414	mov ar.unat=r18				// restore caller's unat
415	mov ar.rnat=r30				// must restore after bspstore but before rsc!
416	mov ar.fpsr=r19				// restore fpsr
417	mov ar.rsc=3				// put RSE back into eager mode, pl 0
418	br.cond.sptk.many b7
419END(load_switch_stack)
420
421GLOBAL_ENTRY(__ia64_syscall)
422	.regstk 6,0,0,0
423	mov r15=in5				// put syscall number in place
424	break __BREAK_SYSCALL
425	movl r2=errno
426	cmp.eq p6,p7=-1,r10
427	;;
428(p6)	st4 [r2]=r8
429(p6)	mov r8=-1
430	br.ret.sptk.many rp
431END(__ia64_syscall)
432
433	/*
434	 * We invoke syscall_trace through this intermediate function to
435	 * ensure that the syscall input arguments are not clobbered.  We
436	 * also use it to preserve b6, which contains the syscall entry point.
437	 */
438GLOBAL_ENTRY(invoke_syscall_trace)
439	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
440	alloc loc1=ar.pfs,8,3,0,0
441	mov loc0=rp
442	.body
443	mov loc2=b6
444	;;
445	br.call.sptk.many rp=syscall_trace
446.ret3:	mov rp=loc0
447	mov ar.pfs=loc1
448	mov b6=loc2
449	br.ret.sptk.many rp
450END(invoke_syscall_trace)
451
452	/*
453	 * Invoke a system call, but do some tracing before and after the call.
454	 * We MUST preserve the current register frame throughout this routine
455	 * because some system calls (such as ia64_execve) directly
456	 * manipulate ar.pfs.
457	 *
458	 * Input:
459	 *	r15 = syscall number
460	 *	b6  = syscall entry point
461	 */
462	.global ia64_strace_leave_kernel
463
464GLOBAL_ENTRY(ia64_trace_syscall)
465	PT_REGS_UNWIND_INFO(0)
466{	/*
467	 * Some versions of gas generate bad unwind info if the first instruction of a
468	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.
469	 */
470	nop.m 0
471	nop.i 0
472	br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
473}
474.ret6:	br.call.sptk.many rp=b6			// do the syscall
475strace_check_retval:
476	cmp.lt p6,p0=r8,r0			// syscall failed?
477	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
478	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
479	mov r10=0
480(p6)	br.cond.sptk strace_error		// syscall failed ->
481	;;					// avoid RAW on r10
482strace_save_retval:
483.mem.offset 0,0;	st8.spill [r2]=r8	// store return value in slot for r8
484.mem.offset 8,0;	st8.spill [r3]=r10	// clear error indication in slot for r10
485ia64_strace_leave_kernel:
486	br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
487.rety:	br.cond.sptk ia64_leave_syscall
488
489strace_error:
490	ld8 r3=[r2]				// load pt_regs.r8
491	sub r9=0,r8				// negate return value to get errno value
492	;;
493	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?
494	adds r3=16,r2				// r3=&pt_regs.r10
495	;;
496(p6)	mov r10=-1
497(p6)	mov r8=r9
498	br.cond.sptk strace_save_retval
499END(ia64_trace_syscall)
500
501GLOBAL_ENTRY(ia64_ret_from_clone)
502	PT_REGS_UNWIND_INFO(0)
503{	/*
504	 * Some versions of gas generate bad unwind info if the first instruction of a
505	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.
506	 */
507	nop.m 0
508	nop.i 0
509	/*
510	 * We need to call schedule_tail() to complete the scheduling process.
511	 * Called by ia64_switch_to after do_fork()->copy_thread().  r8 contains the
512	 * address of the previously executing task.
513	 */
514	br.call.sptk.many rp=ia64_invoke_schedule_tail
515}
516.ret8:
517	adds r2=IA64_TASK_PTRACE_OFFSET,r13
518	;;
519	ld8 r2=[r2]
520	;;
521	mov r8=0
522	tbit.nz p6,p0=r2,PT_TRACESYS_BIT
523(p6)	br.cond.spnt strace_check_retval
524	;;					// added stop bits to prevent r8 dependency
525END(ia64_ret_from_clone)
526	// fall through
527GLOBAL_ENTRY(ia64_ret_from_syscall)
528	PT_REGS_UNWIND_INFO(0)
529	cmp.ge p6,p7=r8,r0			// syscall executed successfully?
530	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
531	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
532	;;
533	.mem.offset 0,0
534(p6)	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
535	.mem.offset 8,0
536(p6)	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
537(p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
538END(ia64_ret_from_syscall)
539	// fall through
540/*
541 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
542 *	need to switch to bank 0 and doesn't restore the scratch registers.
543 *	To avoid leaking kernel bits, the scratch registers are set to
544 *	the following known-to-be-safe values:
545 *
546 *		  r1: restored (global pointer)
547 *		  r2: cleared
548 *		  r3: cleared
549 *	      r8-r11: restored (syscall return value(s))
550 *		 r12: restored (user-level stack pointer)
551 *		 r13: restored (user-level thread pointer)
552 *		 r14: cleared
553 *		 r15: restored (syscall #)
554 *	     r16-r19: cleared
555 *		 r20: user-level ar.fpsr
556 *		 r21: user-level b0
557 *		 r22: cleared
558 *		 r23: user-level ar.bspstore
559 *		 r24: user-level ar.rnat
560 *		 r25: user-level ar.unat
561 *		 r26: user-level ar.pfs
562 *		 r27: user-level ar.rsc
563 *		 r28: user-level ip
564 *		 r29: user-level psr
565 *		 r30: user-level cfm
566 *		 r31: user-level pr
567 *	      f6-f11: cleared
568 *		  pr: restored (user-level pr)
569 *		  b0: restored (user-level rp)
570 *	          b6: cleared
571 *		  b7: cleared
572 *	     ar.unat: restored (user-level ar.unat)
573 *	      ar.pfs: restored (user-level ar.pfs)
574 *	      ar.rsc: restored (user-level ar.rsc)
575 *	     ar.rnat: restored (user-level ar.rnat)
576 *	 ar.bspstore: restored (user-level ar.bspstore)
577 *	     ar.fpsr: restored (user-level ar.fpsr)
578 *	      ar.ccv: cleared
579 *	      ar.csd: cleared
580 *	      ar.ssd: cleared
581 */
582GLOBAL_ENTRY(ia64_leave_syscall)
583	PT_REGS_UNWIND_INFO(0)
584	lfetch.fault [sp]
585	movl r14=.restart1
586	;;
587	mov.ret.sptk rp=r14,.restart1
588	cmp.eq pLvSys,p0=r0,r0			// pLvSys=1: leave from syscall
589.restart1:
590	// need_resched and signals atomic test
591(pUser)	rsm psr.i
592	adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13
593	adds r18=IA64_TASK_SIGPENDING_OFFSET,r13
594#ifdef CONFIG_PERFMON
595	adds r19=IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET,r13
596#endif
597	;;
598#ifdef CONFIG_PERFMON
599(pUser)	ld8 r19=[r19]				// load current->thread.pfm_ovfl_block_reset
600#endif
601(pUser)	ld8 r17=[r17]				// load current->need_resched
602(pUser)	ld4 r18=[r18]				// load current->sigpending
603	;;
604#ifdef CONFIG_PERFMON
605(pUser)	cmp.ne.unc p9,p0=r19,r0			// current->thread.pfm_ovfl_block_reset != 0?
606#endif
607(pUser)	cmp.ne.unc p7,p0=r17,r0			// current->need_resched != 0?
608(pUser)	cmp.ne.unc p8,p0=r18,r0			// current->sigpending != 0?
609	;;
610#ifdef CONFIG_PERFMON
611(p9)	br.call.spnt.many b7=pfm_ovfl_block_reset
612#endif
613#if __GNUC__ < 3
614(p7)	br.call.spnt.many b7=invoke_schedule
615#else
616(p7)	br.call.spnt.many b7=schedule
617#endif
618(p8)	br.call.spnt.many rp=handle_signal_delivery	// check & deliver pending signals (once)
619
620	mov  ar.csd=r0
621	mov  ar.ssd=r0
622	adds r16=PT(LOADRS)+16,r12
623	adds r17=PT(AR_BSPSTORE)+16, r12
624	mov  f6=f0		// clear f6
625	;;
626	ld8 r19=[r16],PT(R8)-PT(LOADRS)        		// load ar.rsc value for "loadrs"
627	ld8 r23=[r17],PT(R9)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
628	mov r22=r0		// clear r22
629	;;
630	// start restoring the state saved on the kernel stack (struct pt_regs):
631	ld8.fill r8=[r16],16
632	ld8.fill r9=[r17],16
633	mov  f7=f0		// clear f7
634	;;
635	ld8.fill r10=[r16],16
636	ld8.fill r11=[r17],16
637	mov  f8=f0		// clear f8
638	;;
639	ld8 r29=[r16],16	// load cr.ipsr
640	ld8 r28=[r17],16	// load cr.iip
641	mov b7=r0		// clear b7
642	;;
643	ld8 r30=[r16],16	// load cr.ifs
644	ld8 r25=[r17],16	// load ar.unat
645	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
646	;;
647	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
648	invala			// invalidate ALAT
649	mov  f9=f0		// clear f9
650	;;
651	ld8 r26=[r16],16 	// load ar.pfs
652	ld8 r27=[r17],PT(PR)-PT(AR_RSC)// load ar.rsc
653	mov  f10=f0		// clear f10
654	;;
655	ld8 r24=[r16],PT(B0)-PT(AR_RNAT)// load ar.rnat (may be garbage)
656	ld8 r31=[r17],PT(R1)-PT(PR)	    // load predicates
657	mov  f11=f0		// clear f11
658	;;
659	ld8 r21=[r16],PT(R12)-PT(B0)// load b0
660	ld8.fill r1=[r17],16	// load r1
661	mov r3=r0		// clear r3
662	;;
663	ld8.fill r12=[r16],16
664	ld8.fill r13=[r17],16
665	mov r2=r0		// clear r2
666	;;
667	ld8 r20=[r16]		// ar.fpsr
668	ld8.fill r15=[r17]	// load r15
669	adds r18=16,r16
670	;;
671	mov r16=ar.bsp		// get existing backing store pointer
672	movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
673	srlz.i			// ensure interruption collection is off
674	mov ar.ccv=r0		// clear ar.ccv
675	mov b6=r0		// clear b6
676	;;
677	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
678	mov r14=r0		// clear r14
679(pKern)	br.cond.dpnt skip_rbs_switch
680	/*
681	 * Restore user backing store.
682	 *
683	 * NOTE: alloc, loadrs, and cover can't be predicated.
684	 */
685	cover				// add current frame into dirty partition
686	shr.u r18=r19,16	// get byte size of existing "dirty" partition
687	;;
688	mov r19=ar.bsp			// get new backing store pointer
689	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
690	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
691	;;
692	sub r19=r19,r16			// calculate total byte size of dirty partition
693	add r18=64,r18			// don't force in0-in7 into memory...
694	;;
695	shl r19=r19,16			// shift size of dirty partition into loadrs position
696	br.few dont_preserve_current_frame
697	;;
698END(ia64_leave_syscall)
699
700GLOBAL_ENTRY(ia64_ret_from_execve_syscall)
701	PT_REGS_UNWIND_INFO(0)
702	cmp.ge p6,p7=r8,r0			// syscall executed successfully?
703	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
704	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
705	;;
706	.mem.offset 0,0
707(p6)	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
708	.mem.offset 8,0
709(p6)	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
710(p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
711END(ia64_ret_from_execve_syscall)
712	// fall through
713GLOBAL_ENTRY(ia64_leave_kernel)
714	PT_REGS_UNWIND_INFO(0)
715	lfetch.fault [sp]
716	movl r14=.restart
717	;;
718	mov.ret.sptk rp=r14,.restart
719	cmp.eq p0,pLvSys=r0,r0			// pLvSys=0: leave from kernel
720.restart:
721	// need_resched and signals atomic test
722(pUser)	rsm psr.i
723	adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13
724	adds r18=IA64_TASK_SIGPENDING_OFFSET,r13
725#ifdef CONFIG_PERFMON
726	adds r19=IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET,r13
727#endif
728	;;
729#ifdef CONFIG_PERFMON
730(pUser)	ld8 r19=[r19]				// load current->thread.pfm_ovfl_block_reset
731#endif
732(pUser)	ld8 r17=[r17]				// load current->need_resched
733(pUser)	ld4 r18=[r18]				// load current->sigpending
734	;;
735#ifdef CONFIG_PERFMON
736(pUser)	cmp.ne.unc p9,p0=r19,r0			// current->thread.pfm_ovfl_block_reset != 0?
737#endif
738(pUser)	cmp.ne.unc p7,p0=r17,r0			// current->need_resched != 0?
739(pUser)	cmp.ne.unc p8,p0=r18,r0			// current->sigpending != 0?
740	;;
741#ifdef CONFIG_PERFMON
742(p9)	br.call.spnt.many b7=pfm_ovfl_block_reset
743#endif
744#if __GNUC__ < 3
745(p7)	br.call.spnt.many b7=invoke_schedule
746#else
747(p7)	br.call.spnt.many b7=schedule
748#endif
749(p8)	br.call.spnt.many rp=handle_signal_delivery	// check & deliver pending signals (once)
750
751	adds r20=PT(CR_IPSR)+16,r12
752	adds r21=PT(PR)+16,r12
753	;;
754	lfetch.fault.excl [r20]
755	lfetch.fault.excl [r21]
756	adds r2=PT(B6)+16,r12
757	adds r3=PT(R16)+16,r12
758	mov r29=PT(R24)-PT(B6)
759	mov r30=PT(B7)-PT(R24)
760	;;
761	// start restoring the state saved on the kernel stack (struct pt_regs):
762	ld8 r28=[r2],r29	// b6
763	ld8.fill r16=[r3],128
764	mov r31=PT(AR_CSD)-PT(AR_CCV)
765	;;
766	ld8.fill r24=[r2],r30
767	ld8 r15=[r3],r31
768	;;
769	ld8 r29=[r2],16		// b7
770	ld8 r30=[r3],16		// ar.csd
771	;;
772	ld8 r31=[r2],16		// ar.ssd
773	ld8.fill r8=[r3],16
774	;;
775	ld8.fill r9=[r2],16
776	ld8.fill r10=[r3],PT(R17)-PT(R10)
777	;;
778	ld8.fill r11=[r2],PT(R18)-PT(R11)
779	ld8.fill r17=[r3],16
780	;;
781	ld8.fill r18=[r2],16
782	ld8.fill r19=[r3],16
783	;;
784	ld8.fill r20=[r2],16
785	ld8.fill r21=[r3],16
786	mov ar.csd=r30
787	mov ar.ssd=r31
788	;;
789	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
790	invala			// invalidate ALAT
791	;;
792	ld8.fill r22=[r2],24
793	ld8.fill r23=[r3],24
794	mov b6=r28
795	;;
796	ld8.fill r25=[r2],16
797	ld8.fill r26=[r3],16
798	mov b7=r29
799	;;
800	ld8.fill r27=[r2],16
801	ld8.fill r28=[r3],16
802	;;
803	ld8.fill r29=[r2],16
804	ld8.fill r30=[r3],24
805	;;
806	ld8.fill r31=[r2],32
807	ldf.fill f6=[r3],32
808	;;
809	ldf.fill f7=[r2],32
810	ldf.fill f8=[r3],32
811	;;
812	srlz.i			// ensure interruption collection is off
813	mov ar.ccv=r15
814	;;
815	ldf.fill f9=[r2],32
816	ldf.fill f10=[r3],32
817	bsw.0			// switch back to bank 0
818	;;
819	ldf.fill f11=[r2]
820	adds r16=PT(CR_IPSR)+16,r12
821	adds r17=PT(CR_IIP)+16,r12
822	;;
823	ld8 r29=[r16],16	// load cr.ipsr
824	ld8 r28=[r17],16	// load cr.iip
825	;;
826	ld8 r30=[r16],16	// load cr.ifs
827	ld8 r25=[r17],16	// load ar.unat
828	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
829	;;
830	ld8 r26=[r16],16	// load ar.pfs
831	ld8 r27=[r17],16	// load ar.rsc
832	;;
833	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
834	ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
835	;;
836	ld8 r31=[r16],16	// load predicates
837	ld8 r21=[r17],16	// load b0
838	;;
839	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
840	ld8.fill r1=[r17],16	// load r1
841	;;
842	ld8.fill r12=[r16],16
843	ld8.fill r13=[r17],16
844	;;
845	ld8 r20=[r16],16
846	ld8.fill r15=[r17],16
847	;;
848	ld8.fill r14=[r16]
849	ld8.fill r2=[r17],16
850	adds r18=16,r16
851	;;
852	mov r16=ar.bsp		// get existing backing store pointer
853	movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
854	;;
855	ld8.fill r3=[r18]
856	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
857	shr.u r18=r19,16	// get byte size of existing "dirty" partition
858(pKern)	br.cond.dpnt skip_rbs_switch
859	/*
860	 * Restore user backing store.
861	 *
862	 * NOTE: alloc, loadrs, and cover can't be predicated.
863	 */
864(pNonSys) br.cond.dpnt dont_preserve_current_frame
865	cover				// add current frame into dirty partition and set cr.ifs
866	;;
867	mov r19=ar.bsp			// get new backing store pointer
868	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
869	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
870	;;
871	sub r19=r19,r16			// calculate total byte size of dirty partition
872	add r18=64,r18			// don't force in0-in7 into memory...
873	;;
874	shl r19=r19,16			// shift size of dirty partition into loadrs position
875	;;
876dont_preserve_current_frame:
877	/*
878	 * To prevent leaking bits between the kernel and user-space,
879	 * we must clear the stacked registers in the "invalid" partition here.
880	 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
881	 * 5 registers/cycle on McKinley).
882	 */
883#	define pRecurse	p6
884#	define pReturn	p7
885#ifdef CONFIG_ITANIUM
886#	define Nregs	10
887#else
888#	define Nregs	14
889#endif
890	alloc loc0=ar.pfs,2,Nregs-2,2,0
891	shr.u loc1=r18,9		// RNaTslots <= floor(dirtySize / (64*8))
892	sub r17=r17,r18			// r17 = (physStackedSize + 8) - dirtySize
893	;;
894	mov ar.rsc=r19			// load ar.rsc to be used for "loadrs"
895	shladd in0=loc1,3,r17
896	mov in1=0
897	;;
898rse_clear_invalid:
899#ifdef CONFIG_ITANIUM
900	// cycle 0
901 { .mii
902	alloc loc0=ar.pfs,2,Nregs-2,2,0
903	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
904	add out0=-Nregs*8,in0
905}{ .mfb
906	add out1=1,in1			// increment recursion count
907	nop.f 0
908	nop.b 0				// can't do br.call here because of alloc (WAW on CFM)
909	;;
910}{ .mfi	// cycle 1
911	mov loc1=0
912	nop.f 0
913	mov loc2=0
914}{ .mib
915	mov loc3=0
916	mov loc4=0
917(pRecurse) br.call.sptk.many b0=rse_clear_invalid
918
919}{ .mfi	// cycle 2
920	mov loc5=0
921	nop.f 0
922	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
923}{ .mib
924	mov loc6=0
925	mov loc7=0
926(pReturn) br.ret.sptk.many b0
927}
928#else /* !CONFIG_ITANIUM */
929	alloc loc0=ar.pfs,2,Nregs-2,2,0
930	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
931	add out0=-Nregs*8,in0
932	add out1=1,in1			// increment recursion count
933	mov loc1=0
934	mov loc2=0
935	;;
936	mov loc3=0
937	mov loc4=0
938	mov loc5=0
939	mov loc6=0
940	mov loc7=0
941(pRecurse) br.call.sptk.many b0=rse_clear_invalid
942	;;
943	mov loc8=0
944	mov loc9=0
945	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
946	mov loc10=0
947	mov loc11=0
948(pReturn) br.ret.sptk.many b0
949#endif /* !CONFIG_ITANIUM */
950#	undef pRecurse
951#	undef pReturn
952	;;
953	alloc r17=ar.pfs,0,0,0,0	// drop current register frame
954	;;
955	loadrs
956	;;
957skip_rbs_switch:
958(pLvSys)mov r19=r0		// clear r19 for leave_syscall, no-op otherwise
959	mov b0=r21
960	mov ar.pfs=r26
961(pUser)	mov ar.bspstore=r23
962(p9)	mov cr.ifs=r30
963(pLvSys)mov r16=r0		// clear r16 for leave_syscall, no-op otherwise
964	mov cr.ipsr=r29
965	mov ar.fpsr=r20
966(pLvSys)mov r17=r0		// clear r17 for leave_syscall, no-op otherwise
967	mov cr.iip=r28
968	;;
969(pUser)	mov ar.rnat=r24		// must happen with RSE in lazy mode
970(pLvSys)mov r18=r0		// clear r18 for leave_syscall, no-op otherwise
971	mov ar.rsc=r27
972	mov ar.unat=r25
973	mov pr=r31,-1
974	rfi
975END(ia64_leave_kernel)
976
977ENTRY(handle_syscall_error)
978	/*
979	 * Some system calls (e.g., ptrace, mmap) can return arbitrary
980	 * values which could lead us to mistake a negative return
981	 * value as a failed syscall.  Those syscall must deposit
982	 * a non-zero value in pt_regs.r8 to indicate an error.
983	 * If pt_regs.r8 is zero, we assume that the call completed
984	 * successfully.
985	 */
986	PT_REGS_UNWIND_INFO(0)
987	ld8 r3=[r2]		// load pt_regs.r8
988	sub r9=0,r8		// negate return value to get errno
989	;;
990	mov r10=-1		// return -1 in pt_regs.r10 to indicate error
991	cmp.eq p6,p7=r3,r0	// is pt_regs.r8==0?
992	adds r3=16,r2		// r3=&pt_regs.r10
993	;;
994(p6)	mov r9=r8
995(p6)	mov r10=0
996	;;
997.mem.offset 0,0; st8.spill [r2]=r9	// store errno in pt_regs.r8 and set unat bit
998.mem.offset 8,0; st8.spill [r3]=r10	// store error indication in pt_regs.r10 and set unat bit
999	br.cond.sptk ia64_leave_syscall
1000END(handle_syscall_error)
1001
1002	/*
1003	 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1004	 * in case a system call gets restarted.
1005	 */
1006GLOBAL_ENTRY(ia64_invoke_schedule_tail)
1007	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1008	alloc loc1=ar.pfs,8,2,1,0
1009	mov loc0=rp
1010	mov out0=r8				// Address of previous task
1011	;;
1012	br.call.sptk.many rp=schedule_tail
1013.ret11:	mov ar.pfs=loc1
1014	mov rp=loc0
1015	br.ret.sptk.many rp
1016END(ia64_invoke_schedule_tail)
1017
1018#if __GNUC__ < 3
1019
1020	/*
1021	 * Invoke schedule() while preserving in0-in7, which may be needed
1022	 * in case a system call gets restarted.  Note that declaring schedule()
1023	 * with asmlinkage() is NOT enough because that will only preserve as many
1024	 * registers as there are formal arguments.
1025	 *
1026	 * XXX fix me: with gcc 3.0, we won't need this anymore because syscall_linkage
1027	 *	renders all eight input registers (in0-in7) as "untouchable".
1028	 */
1029ENTRY(invoke_schedule)
1030	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1031	alloc loc1=ar.pfs,8,2,0,0
1032	mov loc0=rp
1033	;;
1034	.body
1035	br.call.sptk.many rp=schedule
1036.ret14:	mov ar.pfs=loc1
1037	mov rp=loc0
1038	br.ret.sptk.many rp
1039END(invoke_schedule)
1040
1041#endif /* __GNUC__ < 3 */
1042
1043	/*
1044	 * Setup stack and call ia64_do_signal.  Note that pSys and pNonSys need to
1045	 * be set up by the caller.  We declare 8 input registers so the system call
1046	 * args get preserved, in case we need to restart a system call.
1047	 */
1048ENTRY(handle_signal_delivery)
1049	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1050	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1051	mov r9=ar.unat
1052	mov loc0=rp				// save return address
1053	mov out0=0				// there is no "oldset"
1054	adds out1=8,sp				// out1=&sigscratch->ar_pfs
1055(pSys)	mov out2=1				// out2==1 => we're in a syscall
1056	;;
1057(pNonSys) mov out2=0				// out2==0 => not a syscall
1058	.fframe 16
1059	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
1060	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
1061	st8 [out1]=loc1,-8			// save ar.pfs, out1=&sigscratch
1062	.body
1063	br.call.sptk.many rp=ia64_do_signal
1064.ret15:	.restore sp
1065	adds sp=16,sp				// pop scratch stack space
1066	;;
1067	ld8 r9=[sp]				// load new unat from sw->caller_unat
1068	mov rp=loc0
1069	;;
1070	mov ar.unat=r9
1071	mov ar.pfs=loc1
1072	br.ret.sptk.many rp
1073END(handle_signal_delivery)
1074
1075GLOBAL_ENTRY(sys_rt_sigsuspend)
1076	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1077	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1078	mov r9=ar.unat
1079	mov loc0=rp				// save return address
1080	mov out0=in0				// mask
1081	mov out1=in1				// sigsetsize
1082	adds out2=8,sp				// out2=&sigscratch->ar_pfs
1083	;;
1084	.fframe 16
1085	.spillpsp ar.unat, 16			// (note that offset is relative to psp+0x10!)
1086	st8 [sp]=r9,-16				// allocate space for ar.unat and save it
1087	st8 [out2]=loc1,-8			// save ar.pfs, out2=&sigscratch
1088	.body
1089	br.call.sptk.many rp=ia64_rt_sigsuspend
1090.ret17:	.restore sp
1091	adds sp=16,sp				// pop scratch stack space
1092	;;
1093	ld8 r9=[sp]				// load new unat from sw->caller_unat
1094	mov rp=loc0
1095	;;
1096	mov ar.unat=r9
1097	mov ar.pfs=loc1
1098	br.ret.sptk.many rp
1099END(sys_rt_sigsuspend)
1100
1101ENTRY(sys_rt_sigreturn)
1102	PT_REGS_UNWIND_INFO(0)
1103	/*
1104	 * Allocate 8 input registers since ptrace() may clobber them
1105	 */
1106	alloc r2=ar.pfs,8,0,1,0
1107	.prologue
1108	PT_REGS_SAVES(16)
1109	adds sp=-16,sp
1110	.body
1111	cmp.eq pNonSys,pSys=r0,r0		// sigreturn isn't a normal syscall...
1112	;;
1113	/* After signal handler, live registers f6-f11 are restored to the previous
1114	 * executing context values for synchronized signals(from exceptions); or they
1115	 * are cleared to 0 for asynchronized signals(from syscalls). These live registers
1116	 * will be put into pt_regs to return back to user space.
1117	 */
1118	adds r16=PT(F6)+32,sp
1119	adds r17=PT(F7)+32,sp
1120	;;
1121 	stf.spill [r16]=f6,32
1122 	stf.spill [r17]=f7,32
1123	;;
1124 	stf.spill [r16]=f8,32
1125 	stf.spill [r17]=f9,32
1126	;;
1127 	stf.spill [r16]=f10
1128 	stf.spill [r17]=f11
1129	adds out0=16,sp				// out0 = &sigscratch
1130	br.call.sptk.many rp=ia64_rt_sigreturn
1131.ret19:	.restore sp 0
1132	adds sp=16,sp
1133	;;
1134	ld8 r9=[sp]				// load new ar.unat
1135	mov.sptk b7=r8,ia64_leave_kernel
1136	;;
1137	mov ar.unat=r9
1138	br.many b7
1139END(sys_rt_sigreturn)
1140
1141GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
1142	.prologue
1143	/*
1144	 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
1145	 */
1146	mov r16=r0
1147	DO_SAVE_SWITCH_STACK
1148	br.call.sptk.many rp=ia64_handle_unaligned	// stack frame setup in ivt
1149.ret21:	.body
1150	DO_LOAD_SWITCH_STACK
1151	br.cond.sptk.many rp				// goes to ia64_leave_kernel
1152END(ia64_prepare_handle_unaligned)
1153
1154	//
1155	// unw_init_running(void (*callback)(info, arg), void *arg)
1156	//
1157#	define EXTRA_FRAME_SIZE	((UNW_FRAME_INFO_SIZE+15)&~15)
1158
1159GLOBAL_ENTRY(unw_init_running)
1160	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1161	alloc loc1=ar.pfs,2,3,3,0
1162	;;
1163	ld8 loc2=[in0],8
1164	mov loc0=rp
1165	mov r16=loc1
1166	DO_SAVE_SWITCH_STACK
1167	.body
1168
1169	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1170	.fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
1171	SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
1172	adds sp=-EXTRA_FRAME_SIZE,sp
1173	.body
1174	;;
1175	adds out0=16,sp				// &info
1176	mov out1=r13				// current
1177	adds out2=16+EXTRA_FRAME_SIZE,sp	// &switch_stack
1178	br.call.sptk.many rp=unw_init_frame_info
11791:	adds out0=16,sp				// &info
1180	mov b6=loc2
1181	mov loc2=gp				// save gp across indirect function call
1182	;;
1183	ld8 gp=[in0]
1184	mov out1=in1				// arg
1185	br.call.sptk.many rp=b6			// invoke the callback function
11861:	mov gp=loc2				// restore gp
1187
1188	// For now, we don't allow changing registers from within
1189	// unw_init_running; if we ever want to allow that, we'd
1190	// have to do a load_switch_stack here:
1191	.restore sp
1192	adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1193
1194	mov ar.pfs=loc1
1195	mov rp=loc0
1196	br.ret.sptk.many rp
1197END(unw_init_running)
1198
1199	.rodata
1200	.align 8
1201	.globl sys_call_table
1202sys_call_table:
1203	data8 sys_ni_syscall		//  This must be sys_ni_syscall!  See ivt.S.
1204	data8 sys_exit				// 1025
1205	data8 sys_read
1206	data8 sys_write
1207	data8 sys_open
1208	data8 sys_close
1209	data8 sys_creat				// 1030
1210	data8 sys_link
1211	data8 sys_unlink
1212	data8 ia64_execve
1213	data8 sys_chdir
1214	data8 sys_fchdir			// 1035
1215	data8 sys_utimes
1216	data8 sys_mknod
1217	data8 sys_chmod
1218	data8 sys_chown
1219	data8 sys_lseek				// 1040
1220	data8 sys_getpid
1221	data8 sys_getppid
1222	data8 sys_mount
1223	data8 sys_umount
1224	data8 sys_setuid			// 1045
1225	data8 sys_getuid
1226	data8 sys_geteuid
1227	data8 sys_ptrace
1228	data8 sys_access
1229	data8 sys_sync				// 1050
1230	data8 sys_fsync
1231	data8 sys_fdatasync
1232	data8 sys_kill
1233	data8 sys_rename
1234	data8 sys_mkdir				// 1055
1235	data8 sys_rmdir
1236	data8 sys_dup
1237	data8 sys_pipe
1238	data8 sys_times
1239	data8 ia64_brk				// 1060
1240	data8 sys_setgid
1241	data8 sys_getgid
1242	data8 sys_getegid
1243	data8 sys_acct
1244	data8 sys_ioctl				// 1065
1245	data8 sys_fcntl
1246	data8 sys_umask
1247	data8 sys_chroot
1248	data8 sys_ustat
1249	data8 sys_dup2				// 1070
1250	data8 sys_setreuid
1251	data8 sys_setregid
1252	data8 sys_getresuid
1253	data8 sys_setresuid
1254	data8 sys_getresgid			// 1075
1255	data8 sys_setresgid
1256	data8 sys_getgroups
1257	data8 sys_setgroups
1258	data8 sys_getpgid
1259	data8 sys_setpgid			// 1080
1260	data8 sys_setsid
1261	data8 sys_getsid
1262	data8 sys_sethostname
1263	data8 sys_setrlimit
1264	data8 sys_getrlimit			// 1085
1265	data8 sys_getrusage
1266	data8 sys_gettimeofday
1267	data8 sys_settimeofday
1268	data8 sys_select
1269	data8 sys_poll				// 1090
1270	data8 sys_symlink
1271	data8 sys_readlink
1272	data8 sys_uselib
1273	data8 sys_swapon
1274	data8 sys_swapoff			// 1095
1275	data8 sys_reboot
1276	data8 sys_truncate
1277	data8 sys_ftruncate
1278	data8 sys_fchmod
1279	data8 sys_fchown			// 1100
1280	data8 ia64_getpriority
1281	data8 sys_setpriority
1282	data8 sys_statfs
1283	data8 sys_fstatfs
1284	data8 sys_gettid			// 1105
1285	data8 sys_semget
1286	data8 sys_semop
1287	data8 sys_semctl
1288	data8 sys_msgget
1289	data8 sys_msgsnd			// 1110
1290	data8 sys_msgrcv
1291	data8 sys_msgctl
1292	data8 sys_shmget
1293	data8 ia64_shmat
1294	data8 sys_shmdt				// 1115
1295	data8 sys_shmctl
1296	data8 sys_syslog
1297	data8 sys_setitimer
1298	data8 sys_getitimer
1299	data8 ia64_oldstat			// 1120
1300	data8 ia64_oldlstat
1301	data8 ia64_oldfstat
1302	data8 sys_vhangup
1303	data8 sys_lchown
1304	data8 sys_vm86				// 1125
1305	data8 sys_wait4
1306	data8 sys_sysinfo
1307	data8 sys_clone
1308	data8 sys_setdomainname
1309	data8 sys_newuname			// 1130
1310	data8 sys_adjtimex
1311	data8 ia64_create_module
1312	data8 sys_init_module
1313	data8 sys_delete_module
1314	data8 sys_get_kernel_syms		// 1135
1315	data8 sys_query_module
1316	data8 sys_quotactl
1317	data8 sys_bdflush
1318	data8 sys_sysfs
1319	data8 sys_personality			// 1140
1320	data8 ia64_ni_syscall		// sys_afs_syscall
1321	data8 sys_setfsuid
1322	data8 sys_setfsgid
1323	data8 sys_getdents
1324	data8 sys_flock				// 1145
1325	data8 sys_readv
1326	data8 sys_writev
1327	data8 sys_pread
1328	data8 sys_pwrite
1329	data8 sys_sysctl			// 1150
1330	data8 sys_mmap
1331	data8 sys_munmap
1332	data8 sys_mlock
1333	data8 sys_mlockall
1334	data8 sys_mprotect			// 1155
1335	data8 ia64_mremap
1336	data8 sys_msync
1337	data8 sys_munlock
1338	data8 sys_munlockall
1339	data8 sys_sched_getparam		// 1160
1340	data8 sys_sched_setparam
1341	data8 sys_sched_getscheduler
1342	data8 sys_sched_setscheduler
1343	data8 sys_sched_yield
1344	data8 sys_sched_get_priority_max	// 1165
1345	data8 sys_sched_get_priority_min
1346	data8 sys_sched_rr_get_interval
1347	data8 sys_nanosleep
1348	data8 sys_nfsservctl
1349	data8 sys_prctl				// 1170
1350	data8 sys_getpagesize
1351	data8 sys_mmap2
1352	data8 sys_pciconfig_read
1353	data8 sys_pciconfig_write
1354	data8 sys_perfmonctl			// 1175
1355	data8 sys_sigaltstack
1356	data8 sys_rt_sigaction
1357	data8 sys_rt_sigpending
1358	data8 sys_rt_sigprocmask
1359	data8 sys_rt_sigqueueinfo		// 1180
1360	data8 sys_rt_sigreturn
1361	data8 sys_rt_sigsuspend
1362	data8 sys_rt_sigtimedwait
1363	data8 sys_getcwd
1364	data8 sys_capget			// 1185
1365	data8 sys_capset
1366	data8 sys_sendfile
1367	data8 sys_ni_syscall		// sys_getpmsg (STREAMS)
1368	data8 sys_ni_syscall		// sys_putpmsg (STREAMS)
1369	data8 sys_socket			// 1190
1370	data8 sys_bind
1371	data8 sys_connect
1372	data8 sys_listen
1373	data8 sys_accept
1374	data8 sys_getsockname			// 1195
1375	data8 sys_getpeername
1376	data8 sys_socketpair
1377	data8 sys_send
1378	data8 sys_sendto
1379	data8 sys_recv				// 1200
1380	data8 sys_recvfrom
1381	data8 sys_shutdown
1382	data8 sys_setsockopt
1383	data8 sys_getsockopt
1384	data8 sys_sendmsg			// 1205
1385	data8 sys_recvmsg
1386	data8 sys_pivot_root
1387	data8 sys_mincore
1388	data8 sys_madvise
1389	data8 sys_newstat			// 1210
1390	data8 sys_newlstat
1391	data8 sys_newfstat
1392	data8 sys_clone2
1393	data8 sys_getdents64
1394	data8 sys_getunwind			// 1215
1395	data8 sys_readahead
1396	data8 sys_setxattr
1397	data8 sys_lsetxattr
1398	data8 sys_fsetxattr
1399	data8 sys_getxattr			// 1220
1400	data8 sys_lgetxattr
1401	data8 sys_fgetxattr
1402	data8 sys_listxattr
1403	data8 sys_llistxattr
1404	data8 sys_flistxattr			// 1225
1405	data8 sys_removexattr
1406	data8 sys_lremovexattr
1407	data8 sys_fremovexattr
1408	data8 sys_tkill
1409	data8 ia64_ni_syscall			// 1230
1410	data8 ia64_ni_syscall
1411	data8 ia64_ni_syscall
1412	data8 ia64_ni_syscall
1413	data8 ia64_ni_syscall
1414	data8 ia64_ni_syscall			// 1235
1415	data8 ia64_ni_syscall
1416	data8 ia64_ni_syscall
1417	data8 ia64_ni_syscall
1418	data8 ia64_ni_syscall
1419	data8 ia64_ni_syscall			// 1240
1420	data8 ia64_ni_syscall
1421	data8 ia64_ni_syscall
1422	data8 ia64_ni_syscall
1423	data8 ia64_ni_syscall
1424	data8 ia64_ni_syscall			// 1245
1425	data8 ia64_ni_syscall
1426	data8 ia64_ni_syscall
1427	data8 ia64_ni_syscall
1428	data8 ia64_ni_syscall
1429	data8 ia64_ni_syscall			// 1250
1430	data8 ia64_ni_syscall
1431	data8 ia64_ni_syscall
1432	data8 ia64_ni_syscall
1433	data8 ia64_ni_syscall
1434	data8 ia64_ni_syscall			// 1255
1435	data8 ia64_ni_syscall
1436	data8 ia64_ni_syscall
1437	data8 ia64_ni_syscall
1438	data8 ia64_ni_syscall
1439	data8 ia64_ni_syscall			// 1260
1440	data8 ia64_ni_syscall
1441	data8 ia64_ni_syscall
1442	data8 ia64_ni_syscall
1443	data8 ia64_ni_syscall
1444	data8 ia64_ni_syscall			// 1265
1445	data8 ia64_ni_syscall
1446	data8 ia64_ni_syscall
1447	data8 ia64_ni_syscall
1448	data8 ia64_ni_syscall
1449	data8 ia64_ni_syscall			// 1270
1450	data8 ia64_ni_syscall
1451	data8 ia64_ni_syscall
1452	data8 ia64_ni_syscall
1453	data8 ia64_ni_syscall
1454	data8 ia64_ni_syscall			// 1275
1455	data8 ia64_ni_syscall
1456	data8 ia64_ni_syscall
1457	data8 ia64_ni_syscall
1458	data8 ia64_ni_syscall
1459