1/*
2 * arch/ia64/kernel/ivt.S
3 *
4 * Copyright (C) 2002-2003 Intel Co
5 *      Suresh Siddha <suresh.b.siddha@intel.com>
6 *      Kenneth Chen <kenneth.w.chen@intel.com>
7 *      Fenghua Yu <fenghua.yu@intel.com>
8 * Copyright (C) 1998-2001 Hewlett-Packard Co
9 *	Stephane Eranian <eranian@hpl.hp.com>
10 *	David Mosberger <davidm@hpl.hp.com>
11 *
12 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
13 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
14 */
15/*
16 * This file defines the interruption vector table used by the CPU.
17 * It does not include one entry per possible cause of interruption.
18 *
19 * The first 20 entries of the table contain 64 bundles each while the
20 * remaining 48 entries contain only 16 bundles each.
21 *
22 * The 64 bundles are used to allow inlining the whole handler for critical
23 * interruptions like TLB misses.
24 *
25 *  For each entry, the comment is as follows:
26 *
27 *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
28 *  entry offset ----/     /         /                  /          /
29 *  entry number ---------/         /                  /          /
30 *  size of the entry -------------/                  /          /
31 *  vector name -------------------------------------/          /
32 *  interruptions triggering this vector ----------------------/
33 *
34 * The table is 32KB in size and must be aligned on 32KB boundary.
35 * (The CPU ignores the 15 lower bits of the address)
36 *
37 * Table is based upon EAS2.6 (Oct 1999)
38 */
39
40#include <linux/config.h>
41
42#include <asm/asmmacro.h>
43#include <asm/break.h>
44#include <asm/kregs.h>
45#include <asm/offsets.h>
46#include <asm/pgtable.h>
47#include <asm/processor.h>
48#include <asm/ptrace.h>
49#include <asm/system.h>
50#include <asm/unistd.h>
51#include <asm/errno.h>
52
53#if 1
54# define PSR_DEFAULT_BITS	psr.ac
55#else
56# define PSR_DEFAULT_BITS	0
57#endif
58
59#if 0
60  /*
61   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
62   * needed for something else before enabling this...
63   */
64# define DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
65#else
66# define DBG_FAULT(i)
67#endif
68
69#define MINSTATE_VIRT	/* needed by minstate.h */
70#include "minstate.h"
71
72#define FAULT(n)									\
73	mov r31=pr;									\
74	mov r19=n;;			/* prepare to save predicates */		\
75	br.sptk.many dispatch_to_fault_handler
76
77/*
78 * As we don't (hopefully) use the space available, we need to fill it with
79 * nops. the parameter may be used for debugging and is representing the entry
80 * number
81 */
82#define BREAK_BUNDLE(a)	break.m (a); \
83				break.i (a); \
84				break.i (a)
85/*
86 * 4 breaks bundles all together
87 */
88#define BREAK_BUNDLE4(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a)
89
90/*
91 * 8 bundles all together (too lazy to use only 4 at a time !)
92 */
93#define BREAK_BUNDLE8(a); BREAK_BUNDLE4(a); BREAK_BUNDLE4(a)
94
95	.section .text.ivt,"ax"
96
97	.align 32768	// align on 32KB boundary
98	.global ia64_ivt
99ia64_ivt:
100/////////////////////////////////////////////////////////////////////////////////////////
101// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
102ENTRY(vhpt_miss)
103	DBG_FAULT(0)
104	/*
105	 * The VHPT vector is invoked when the TLB entry for the virtual page table
106	 * is missing.  This happens only as a result of a previous
107	 * (the "original") TLB miss, which may either be caused by an instruction
108	 * fetch or a data access (or non-access).
109	 *
110	 * What we do here is normal TLB miss handing for the _original_ miss, followed
111	 * by inserting the TLB entry for the virtual page table page that the VHPT
112	 * walker was attempting to access.  The latter gets inserted as long
113	 * as both L1 and L2 have valid mappings for the faulting address.
114	 * The TLB entry for the original miss gets inserted only if
115	 * the L3 entry indicates that the page is present.
116	 *
117	 * do_page_fault gets invoked in the following cases:
118	 *	- the faulting virtual address uses unimplemented address bits
119	 *	- the faulting virtual address has no L1, L2, or L3 mapping
120	 */
121	mov r16=cr.ifa				// get address that caused the TLB miss
122#ifdef CONFIG_HUGETLB_PAGE
123	movl r18=PAGE_SHIFT
124	mov r25=cr.itir
125#endif
126	;;
127	rsm psr.dt				// use physical addressing for data
128	mov r31=pr				// save the predicate registers
129	mov r19=IA64_KR(PT_BASE)		// get page table base address
130	shl r21=r16,3				// shift bit 60 into sign bit
131	shr.u r17=r16,61			// get the region number into r17
132	;;
133	shr r22=r21,3
134#ifdef CONFIG_HUGETLB_PAGE
135	extr.u r26=r25,2,6
136	;;
137	cmp.eq p8,p0=HPAGE_SHIFT,r26
138	;;
139(p8)	dep r25=r18,r25,2,6
140(p8)	shr r22=r22,HPAGE_SHIFT-PAGE_SHIFT
141#endif
142	;;
143	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
144	shr.u r18=r22,PGDIR_SHIFT		// get bits 33-63 of the faulting address
145	;;
146(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
147	srlz.d					// ensure "rsm psr.dt" has taken effect
148(p6)	movl r19=__pa(swapper_pg_dir)		// region 5 is rooted at swapper_pg_dir
149(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
150(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
151	;;
152(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
153(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
154	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
155	shr.u r18=r22,PMD_SHIFT			// shift L2 index into position
156	;;
157	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
158	;;
159(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
160	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
161	;;
162(p7)	ld8 r20=[r17]				// fetch the L2 entry (may be 0)
163	shr.u r19=r22,PAGE_SHIFT		// shift L3 index into position
164	;;
165(p7)	cmp.eq.or.andcm p6,p7=r20,r0		// was L2 entry NULL?
166	dep r21=r19,r20,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
167	;;
168(p7)	ld8 r18=[r21]				// read the L3 PTE
169	mov r19=cr.isr				// cr.isr bit 0 tells us if this is an insn miss
170	;;
171(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
172	mov r22=cr.iha				// get the VHPT address that caused the TLB miss
173	;;					// avoid RAW on p7
174(p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB miss?
175	dep r23=0,r20,0,PAGE_SHIFT		// clear low bits to get page address
176	;;
177(p10)	itc.i r18				// insert the instruction TLB entry
178(p11)	itc.d r18				// insert the data TLB entry
179(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
180	mov cr.ifa=r22
181
182#ifdef CONFIG_HUGETLB_PAGE
183(p8)	mov cr.itir=r25				// change to default page-size for VHPT
184#endif
185
186	/*
187	 * Now compute and insert the TLB entry for the virtual page table.  We never
188	 * execute in a page table page so there is no need to set the exception deferral
189	 * bit.
190	 */
191	adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
192	;;
193(p7)	itc.d r24
194	;;
195#ifdef CONFIG_SMP
196	/*
197	 * Re-check L2 and L3 pagetable.  If they changed, we may have received a ptc.g
198	 * between reading the pagetable and the "itc".  If so, flush the entry we
199	 * inserted and retry.
200	 */
201	ld8 r25=[r21]				// read L3 PTE again
202	ld8 r26=[r17]				// read L2 entry again
203	;;
204	cmp.ne p6,p7=r26,r20			// did L2 entry change
205	mov r27=PAGE_SHIFT<<2
206	;;
207(p6)	ptc.l r22,r27				// purge PTE page translation
208(p7)	cmp.ne.or.andcm p6,p7=r25,r18		// did L3 PTE change
209	;;
210(p6)	ptc.l r16,r27				// purge translation
211#endif
212
213	mov pr=r31,-1				// restore predicate registers
214	rfi
215END(vhpt_miss)
216
217	.align 1024
218/////////////////////////////////////////////////////////////////////////////////////////
219// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
220ENTRY(itlb_miss)
221	DBG_FAULT(1)
222	/*
223	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
224	 * page table.  If a nested TLB miss occurs, we switch into physical
225	 * mode, walk the page table, and then re-execute the L3 PTE read
226	 * and go on normally after that.
227	 */
228	mov r16=cr.ifa				// get virtual address
229	mov r29=b0				// save b0
230	mov r31=pr				// save predicates
231itlb_fault:
232	mov r17=cr.iha				// get virtual address of L3 PTE
233	movl r30=1f				// load nested fault continuation point
234	;;
2351:	ld8 r18=[r17]				// read L3 PTE
236	;;
237	mov b0=r29
238	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
239(p6)	br.cond.spnt page_fault
240	;;
241	itc.i r18
242	;;
243#ifdef CONFIG_SMP
244	ld8 r19=[r17]				// read L3 PTE again and see if same
245	mov r20=PAGE_SHIFT<<2			// setup page size for purge
246	;;
247	cmp.ne p7,p0=r18,r19
248	;;
249(p7)	ptc.l r16,r20
250#endif
251	mov pr=r31,-1
252	rfi
253END(itlb_miss)
254
255	.align 1024
256/////////////////////////////////////////////////////////////////////////////////////////
257// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
258ENTRY(dtlb_miss)
259	DBG_FAULT(2)
260	/*
261	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
262	 * page table.  If a nested TLB miss occurs, we switch into physical
263	 * mode, walk the page table, and then re-execute the L3 PTE read
264	 * and go on normally after that.
265	 */
266	mov r16=cr.ifa				// get virtual address
267	mov r29=b0				// save b0
268	mov r31=pr				// save predicates
269dtlb_fault:
270	mov r17=cr.iha				// get virtual address of L3 PTE
271	movl r30=1f				// load nested fault continuation point
272	;;
2731:	ld8 r18=[r17]				// read L3 PTE
274	;;
275	mov b0=r29
276	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
277(p6)	br.cond.spnt page_fault
278	;;
279	itc.d r18
280	;;
281#ifdef CONFIG_SMP
282	ld8 r19=[r17]				// read L3 PTE again and see if same
283	mov r20=PAGE_SHIFT<<2			// setup page size for purge
284	;;
285	cmp.ne p7,p0=r18,r19
286	;;
287(p7)	ptc.l r16,r20
288#endif
289	mov pr=r31,-1
290	rfi
291END(dtlb_miss)
292
293	.align 1024
294/////////////////////////////////////////////////////////////////////////////////////////
295// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
296ENTRY(alt_itlb_miss)
297	DBG_FAULT(3)
298	mov r16=cr.ifa		// get address that caused the TLB miss
299	movl r17=PAGE_KERNEL
300	mov r21=cr.ipsr
301	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
302	mov r31=pr
303	;;
304#ifdef CONFIG_DISABLE_VHPT
305	shr.u r22=r16,61			// get the region number into r21
306	;;
307	cmp.gt p8,p0=6,r22			// user mode
308	;;
309(p8)	thash r17=r16
310	;;
311(p8)	mov cr.iha=r17
312(p8)	mov r29=b0				// save b0
313(p8)	br.cond.dptk itlb_fault
314#endif
315	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
316	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
317	shr.u r18=r16,57	// move address bit 61 to bit 4
318	;;
319	andcm r18=0x10,r18	// bit 4=~address-bit(61)
320	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
321	or r19=r17,r19		// insert PTE control bits into r19
322	;;
323	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
324(p8)	br.cond.spnt page_fault
325	;;
326	itc.i r19		// insert the TLB entry
327	mov pr=r31,-1
328	rfi
329END(alt_itlb_miss)
330
331	.align 1024
332/////////////////////////////////////////////////////////////////////////////////////////
333// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
334ENTRY(alt_dtlb_miss)
335	DBG_FAULT(4)
336	mov r16=cr.ifa		// get address that caused the TLB miss
337	movl r17=PAGE_KERNEL
338	mov r20=cr.isr
339	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
340	mov r21=cr.ipsr
341	mov r31=pr
342	;;
343#ifdef CONFIG_DISABLE_VHPT
344	shr.u r22=r16,61			// get the region number into r21
345	;;
346	cmp.gt p8,p0=6,r22			// access to region 0-5
347	;;
348(p8)	thash r17=r16
349	;;
350(p8)	mov cr.iha=r17
351(p8)	mov r29=b0				// save b0
352(p8)	br.cond.dptk dtlb_fault
353#endif
354	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
355	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
356	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
357	shr.u r18=r16,57			// move address bit 61 to bit 4
358	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
359	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
360	;;
361	andcm r18=0x10,r18	// bit 4=~address-bit(61)
362	cmp.ne p8,p0=r0,r23
363(p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
364(p8)	br.cond.spnt page_fault
365
366	dep r21=-1,r21,IA64_PSR_ED_BIT,1
367	or r19=r19,r17		// insert PTE control bits into r19
368	;;
369	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
370(p6)	mov cr.ipsr=r21
371	;;
372(p7)	itc.d r19		// insert the TLB entry
373	mov pr=r31,-1
374	rfi
375END(alt_dtlb_miss)
376
377	//-----------------------------------------------------------------------------------
378	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
379ENTRY(page_fault)
380	ssm psr.dt
381	;;
382	srlz.i
383	;;
384	SAVE_MIN_WITH_COVER
385	alloc r15=ar.pfs,0,0,3,0
386	mov out0=cr.ifa
387	mov out1=cr.isr
388	adds r3=8,r2				// set up second base pointer
389	;;
390	ssm psr.ic | PSR_DEFAULT_BITS
391	;;
392	srlz.i					// guarantee that interruption collectin is on
393	;;
394(p15)	ssm psr.i				// restore psr.i
395	movl r14=ia64_leave_kernel
396	;;
397	SAVE_REST
398	mov rp=r14
399	;;
400	adds out2=16,r12			// out2 = pointer to pt_regs
401	br.call.sptk.many b6=ia64_do_page_fault	// ignore return address
402END(page_fault)
403
404	.align 1024
405/////////////////////////////////////////////////////////////////////////////////////////
406// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
407ENTRY(nested_dtlb_miss)
408	/*
409	 * In the absence of kernel bugs, we get here when the virtually mapped linear
410	 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
411	 * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
412	 * table is missing, a nested TLB miss fault is triggered and control is
413	 * transferred to this point.  When this happens, we lookup the pte for the
414	 * faulting address by walking the page table in physical mode and return to the
415	 * continuation point passed in register r30 (or call page_fault if the address is
416	 * not mapped).
417	 *
418	 * Input:	r16:	faulting address
419	 *		r29:	saved b0
420	 *		r30:	continuation address
421	 *		r31:	saved pr
422	 *
423	 * Output:	r17:	physical address of L3 PTE of faulting address
424	 *		r29:	saved b0
425	 *		r30:	continuation address
426	 *		r31:	saved pr
427	 *
428	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
429	 */
430	rsm psr.dt				// switch to using physical data addressing
431	mov r19=IA64_KR(PT_BASE)		// get the page table base address
432	shl r21=r16,3				// shift bit 60 into sign bit
433	;;
434	shr.u r17=r16,61			// get the region number into r17
435	;;
436	cmp.eq p6,p7=5,r17			// is faulting address in region 5?
437	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of faulting address
438	;;
439(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
440	srlz.d
441(p6)	movl r19=__pa(swapper_pg_dir)		// region 5 is rooted at swapper_pg_dir
442(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
443(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
444	;;
445(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
446(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
447	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
448	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
449	;;
450	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
451	;;
452(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
453	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
454	;;
455(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
456	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
457	;;
458(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
459	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
460(p6)	br.cond.spnt page_fault
461	mov b0=r30
462	br.sptk.many b0				// return to continuation point
463END(nested_dtlb_miss)
464
465	.align 1024
466/////////////////////////////////////////////////////////////////////////////////////////
467// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
468ENTRY(ikey_miss)
469	DBG_FAULT(6)
470	FAULT(6)
471END(ikey_miss)
472
473	.align 1024
474/////////////////////////////////////////////////////////////////////////////////////////
475// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
476ENTRY(dkey_miss)
477	DBG_FAULT(7)
478	FAULT(7)
479END(dkey_miss)
480
481	.align 1024
482/////////////////////////////////////////////////////////////////////////////////////////
483// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
484ENTRY(dirty_bit)
485	DBG_FAULT(8)
486	/*
487	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
488	 * update both the page-table and the TLB entry.  To efficiently access the PTE,
489	 * we address it through the virtual page table.  Most likely, the TLB entry for
490	 * the relevant virtual page table page is still present in the TLB so we can
491	 * normally do this without additional TLB misses.  In case the necessary virtual
492	 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
493	 * up the physical address of the L3 PTE and then continue at label 1 below.
494	 */
495	mov r16=cr.ifa				// get the address that caused the fault
496	movl r30=1f				// load continuation point in case of nested fault
497	;;
498	thash r17=r16				// compute virtual address of L3 PTE
499	mov r29=b0				// save b0 in case of nested fault
500	mov r31=pr				// save pr
501#ifdef CONFIG_SMP
502	mov r28=ar.ccv				// save ar.ccv
503	;;
5041:	ld8 r18=[r17]
505	;;					// avoid RAW on r18
506	mov ar.ccv=r18				// set compare value for cmpxchg
507	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
508	;;
509	cmpxchg8.acq r26=[r17],r25,ar.ccv
510	mov r24=PAGE_SHIFT<<2
511	;;
512	cmp.eq p6,p7=r26,r18
513	;;
514(p6)	itc.d r25				// install updated PTE
515	;;
516	ld8 r18=[r17]				// read PTE again
517	;;
518	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
519	;;
520(p7)	ptc.l r16,r24
521	mov b0=r29				// restore b0
522	mov ar.ccv=r28
523#else
524	;;
5251:	ld8 r18=[r17]
526	;;					// avoid RAW on r18
527	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
528	mov b0=r29				// restore b0
529	;;
530	st8 [r17]=r18				// store back updated PTE
531	itc.d r18				// install updated PTE
532#endif
533	mov pr=r31,-1				// restore pr
534	rfi
535END(idirty_bit)
536
537	.align 1024
538/////////////////////////////////////////////////////////////////////////////////////////
539// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
540ENTRY(iaccess_bit)
541	DBG_FAULT(9)
542	// Like Entry 8, except for instruction access
543	mov r16=cr.ifa				// get the address that caused the fault
544	movl r30=1f				// load continuation point in case of nested fault
545	mov r31=pr				// save predicates
546#ifdef CONFIG_ITANIUM
547	/*
548	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
549	 */
550	mov r17=cr.ipsr
551	;;
552	mov r18=cr.iip
553	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?
554	;;
555(p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa
556#endif /* CONFIG_ITANIUM */
557	;;
558	thash r17=r16				// compute virtual address of L3 PTE
559	mov r29=b0				// save b0 in case of nested fault)
560#ifdef CONFIG_SMP
561	mov r28=ar.ccv				// save ar.ccv
562	;;
5631:	ld8 r18=[r17]
564	;;
565	mov ar.ccv=r18				// set compare value for cmpxchg
566	or r25=_PAGE_A,r18			// set the accessed bit
567	;;
568	cmpxchg8.acq r26=[r17],r25,ar.ccv
569	mov r24=PAGE_SHIFT<<2
570	;;
571	cmp.eq p6,p7=r26,r18
572	;;
573(p6)	itc.i r25				// install updated PTE
574	;;
575	ld8 r18=[r17]				// read PTE again
576	;;
577	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
578	;;
579(p7)	ptc.l r16,r24
580	mov b0=r29				// restore b0
581	mov ar.ccv=r28
582#else /* !CONFIG_SMP */
583	;;
5841:	ld8 r18=[r17]
585	;;
586	or r18=_PAGE_A,r18			// set the accessed bit
587	mov b0=r29				// restore b0
588	;;
589	st8 [r17]=r18				// store back updated PTE
590	itc.i r18				// install updated PTE
591#endif /* !CONFIG_SMP */
592	mov pr=r31,-1
593	rfi
594END(iaccess_bit)
595
596	.align 1024
597/////////////////////////////////////////////////////////////////////////////////////////
598// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
599ENTRY(daccess_bit)
600	DBG_FAULT(10)
601	// Like Entry 8, except for data access
602	mov r16=cr.ifa				// get the address that caused the fault
603	movl r30=1f				// load continuation point in case of nested fault
604	;;
605	thash r17=r16				// compute virtual address of L3 PTE
606	mov r31=pr
607	mov r29=b0				// save b0 in case of nested fault)
608#ifdef CONFIG_SMP
609	mov r28=ar.ccv				// save ar.ccv
610	;;
6111:	ld8 r18=[r17]
612	;;					// avoid RAW on r18
613	mov ar.ccv=r18				// set compare value for cmpxchg
614	or r25=_PAGE_A,r18			// set the dirty bit
615	;;
616	cmpxchg8.acq r26=[r17],r25,ar.ccv
617	mov r24=PAGE_SHIFT<<2
618	;;
619	cmp.eq p6,p7=r26,r18
620	;;
621(p6)	itc.d r25				// install updated PTE
622	;;
623	ld8 r18=[r17]				// read PTE again
624	;;
625	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
626	;;
627(p7)	ptc.l r16,r24
628	mov ar.ccv=r28
629#else
630	;;
6311:	ld8 r18=[r17]
632	;;					// avoid RAW on r18
633	or r18=_PAGE_A,r18			// set the accessed bit
634	;;
635	st8 [r17]=r18				// store back updated PTE
636	itc.d r18				// install updated PTE
637#endif
638	mov b0=r29				// restore b0
639	mov pr=r31,-1
640	rfi
641END(daccess_bit)
642
643	.align 1024
644/////////////////////////////////////////////////////////////////////////////////////////
645// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
646ENTRY(break_fault)
647
648/* System call entry/exit only saves/restores part of pt_regs, i.e. no scratch registers
649 * are saved/restored except r15 which contains syscall number and needs to be saved in the
650 * entry. This optimization is based on the assumption that applications only call glibc
651 * system call interface which doesn't use scratch registers after break into kernel.
652 * Registers saved/restored during system call entry/exit are listed as follows:
653 *
654 *   Registers to be saved & restored:
655 *	CR registers: cr_ipsr, cr_iip, cr_ifs
656 *	AR registers: ar_unat, ar_pfs, ar_rsc, ar_rnat, ar_bspstore, ar_fpsr
657 * 	others: pr, b0, loadrs, r1, r12, r13, r15
658 *   Registers to be restored only:
659 * 	r8~r11: output value from the system call.
660 *
661 * During system call exit, scratch registers (including r15) are modified/cleared to
662 * prevent leaking bits from kernel to user level.
663 */
664	DBG_FAULT(11)
665	mov r16=cr.iim
666	mov r17=__IA64_BREAK_SYSCALL
667	mov r31=pr		// prepare to save predicates
668	;;
669	cmp.eq p0,p7=r16,r17	// is this a system call? (p7 <- false, if so)
670(p7)	br.cond.spnt non_syscall
671
672	mov r21=ar.fpsr;
673	mov r29=cr.ipsr;
674	mov r20=r1;
675	mov r25=ar.unat;
676	mov r27=ar.rsc;
677	mov r26=ar.pfs;
678	mov r28=cr.iip;
679	mov r1=IA64_KR(CURRENT);		/* r1 = current (physical) */
680	;;
681	invala;
682
683	/* adjust return address so we skip over the break instruction: */
684
685	extr.u r8=r29,41,2			// extract ei field from cr.ipsr
686	extr.u r16=r29,32,2;			/* extract psr.cpl */
687	;;
688	cmp.eq p6,p7=2,r8			// isr.ei==2?
689	cmp.eq pKern,pUser=r0,r16;		/* are we in kernel mode already? (psr.cpl==0) */
690	;;
691(p6)	mov r8=0				// clear ei to 0
692(p6)	adds r28=16,r28				// switch cr.iip to next bundle cr.ipsr.ei wrapped
693(p7)	adds r8=1,r8				// increment ei to next slot
694	;;
695	dep r29=r8,r29,41,2			// insert new ei into cr.ipsr
696	;;
697
698	/* switch from user to kernel RBS: */
699	mov r30=r0
700	MINSTATE_START_SAVE_MIN_VIRT
701	br.call.sptk.many b7=ia64_syscall_setup
702	;;
703	// p10==true means out registers are more than 8 or r15's Nat is true
704(p10)	br.cond.spnt.many ia64_ret_from_syscall
705	mov r3=255
706	adds r15=-1024,r15			// r15 contains the syscall number---subtract 1024
707	adds r2=IA64_TASK_PTRACE_OFFSET,r13	// r2 = &current->ptrace
708	;;
709	cmp.geu p6,p7=r3,r15		// (syscall > 0 && syscall <= 1024+255) ?
710	movl r16=sys_call_table
711	;;
712(p6)	shladd r16=r15,3,r16
713	movl r15=ia64_ret_from_syscall
714(p7)	adds r16=(__NR_ni_syscall-1024)*8,r16	// force __NR_ni_syscall
715	;;
716	ld8 r16=[r16]				// load address of syscall entry point
717	mov rp=r15				// set the real return addr
718	;;
719	ld8 r2=[r2]				// r2 = current->ptrace
720	mov b6=r16
721
722	;;
723	tbit.z p8,p0=r2,PT_TRACESYS_BIT		// (current->ptrace & PF_TRACESYS) == 0?
724	;;
725(p8)	br.call.sptk.many b6=b6			// ignore this return addr
726
727	br.cond.sptk ia64_trace_syscall
728	// NOT REACHED
729END(break_fault)
730
731
732	.align 1024
733/////////////////////////////////////////////////////////////////////////////////////////
734// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
735ENTRY(interrupt)
736	DBG_FAULT(12)
737	mov r31=pr		// prepare to save predicates
738	;;
739	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
740	ssm psr.ic | PSR_DEFAULT_BITS
741	;;
742	adds r3=8,r2		// set up second base pointer for SAVE_REST
743	srlz.i			// ensure everybody knows psr.ic is back on
744	;;
745	SAVE_REST
746	;;
747	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
748	mov out0=cr.ivr		// pass cr.ivr as first arg
749	add out1=16,sp		// pass pointer to pt_regs as second arg
750	;;
751	srlz.d			// make  sure we see the effect of cr.ivr
752	movl r14=ia64_leave_kernel
753	;;
754	mov rp=r14
755	br.call.sptk.many b6=ia64_handle_irq
756END(interrupt)
757
758	.align 1024
759/////////////////////////////////////////////////////////////////////////////////////////
760// 0x3400 Entry 13 (size 64 bundles) Reserved
761	DBG_FAULT(13)
762	FAULT(13)
763
764	.align 1024
765/////////////////////////////////////////////////////////////////////////////////////////
766// 0x3800 Entry 14 (size 64 bundles) Reserved
767	DBG_FAULT(14)
768	FAULT(14)
769
770	/*
771	 * There is no particular reason for this code to be here, other than that
772	 * there happens to be space here that would go unused otherwise.  If this
773	 * fault ever gets "unreserved", simply moved the following code to a more
774	 * suitable spot...
775	 *
776	 * ia64_syscall_setup() is a separate subroutine so that it can
777	 *	allocate stacked registers so it can safely demine any
778	 *	potential NaT values from the input registers.
779	 *
780	 * On entry:
781	 *	- executing on bank 0 or bank 1 register set (doesn't matter)
782	 *	-  r1: stack pointer
783	 *	-  r2: current task pointer
784	 *	-  r3: preserved
785	 *	- r12: original contents (sp to be saved)
786	 *	- r13: original contents (tp to be saved)
787	 *	- r15: original contents (syscall # to be saved)
788	 *	- r18: saved bsp (after switching to kernel stack)
789	 *	- r20: saved r1 (gp)
790	 *	- r21: saved ar.fpsr
791	 *	- r22: kernel's register backing store base (krbs_base)
792	 *	- r23: saved ar.bspstore
793	 *	- r24: saved ar.rnat
794	 *	- r25: saved ar.unat
795	 *	- r26: saved ar.pfs
796	 *	- r27: saved ar.rsc
797	 *	- r28: saved cr.iip
798	 *	- r29: saved cr.ipsr
799	 *	- r31: saved pr
800	 *	-  b0: original contents (to be saved)
801	 * On exit:
802	 *	- executing on bank 1 registers
803	 *	- psr.ic enabled, interrupts restored
804	 *	-  r1: kernel's gp
805	 *	-  r3: preserved (same as on entry)
806	 *	-  r8: -EINVAL if p10 is true
807	 *	- r12: points to kernel stack
808	 *	- r13: points to current task
809	 *	- p10: TRUE if syscall is invoked with more than 8 out
810	 *	       registers or r15's Nat is true
811	 *	- p15: TRUE if interrupts need to be re-enabled
812	 *	- ar.fpsr: set to kernel settings
813	 */
814ENTRY(ia64_syscall_setup)
815	alloc r19=ar.pfs,8,0,0,0
816	tnat.nz p8,p0=in0
817	add r16=PT(CR_IPSR),r1  /* initialize first base pointer */
818	;;
819	st8 [r16]=r29,16;   	/* save cr.ipsr */
820	adds r17=PT(CR_IIP),r1; /* initialize second base pointer */
821	;;
822(p8)	mov in0=-1
823	tnat.nz p9,p0=in1
824	st8 [r17]=r28,16;	/* save cr.iip */
825	mov r28=b0;
826(pKern) mov r18=r0;             /* make sure r18 isn't NaT */
827	extr.u r11=r19,7,7	/* get sol of ar.pfs */
828	and r8=0x7f,r19		/* get sof of ar.pfs */
829	;;
830(p9)	mov in1=-1
831	tnat.nz p10,p0=in2
832	st8 [r16]=r30,16;	/* save cr.ifs */
833	st8 [r17]=r25,16;	/* save ar.unat */
834(pUser) sub r18=r18,r22;	/* r18=RSE.ndirty*8 */
835	add r11=8,r11
836	;;
837	st8 [r16]=r26,16;	/* save ar.pfs */
838	st8 [r17]=r27,16;	/* save ar.rsc */
839	tbit.nz p15,p0=r29,IA64_PSR_I_BIT
840	;;                      /* avoid RAW on r16 & r17 */
841(p10)	mov in2=-1
842	nop.f 0
843	tnat.nz p11,p0=in3
844(pKern) adds r16=16,r16;        /* skip over ar_rnat field */
845(pKern) adds r17=16,r17;        /* skip over ar_bspstore field */
846	shl r18=r18,16;         /* compute ar.rsc to be used for "loadrs" */
847	;;
848(p11)	mov in3=-1
849	tnat.nz p12,p0=in4
850(pUser) st8 [r16]=r24,16;	/* save ar.rnat */
851(pUser) st8 [r17]=r23,16;	/* save ar.bspstore */
852	;;
853(p12)	mov in4=-1
854	tnat.nz p13,p0=in5
855	st8 [r16]=r31,16;	/* save predicates */
856	st8 [r17]=r28,16;	/* save b0 */
857	dep r14=-1,r0,61,3;
858	;;
859	st8 [r16]=r18,16;       /* save ar.rsc value for "loadrs" */
860	st8.spill [r17]=r20,16;	/* save original r1 */
861	adds r2=IA64_PT_REGS_R16_OFFSET,r1;
862	;;
863(p13)	mov in5=-1
864	tnat.nz p14,p0=in6
865.mem.offset 0,0;                st8.spill [r16]=r12,16;
866.mem.offset 8,0;                st8.spill [r17]=r13,16;
867	cmp.eq pNonSys,pSys=r0,r0       /* initialize pSys=0, pNonSys=1 */
868	;;
869(p14)	mov in6=-1
870	tnat.nz p8,p0=in7
871.mem.offset 0,0;                st8 [r16]=r21,16;    /* ar.fpsr */
872.mem.offset 8,0;                st8.spill [r17]=r15,16;
873	adds r12=-16,r1;        /* switch to kernel memory stack (with 16 bytes of scratch) */
874	;;
875	cmp.lt p10,p9=r11,r8	/* frame size can't be more than local+8 */
876	mov r13=IA64_KR(CURRENT);       /* establish `current' */
877	movl r1=__gp;           /* establish kernel global pointer */
878	;;
879	MINSTATE_END_SAVE_MIN_VIRT
880
881(p9)	tnat.nz p10,p0=r15
882(p8)	mov in7=-1
883	ssm psr.ic | PSR_DEFAULT_BITS
884	movl r17=FPSR_DEFAULT
885	adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
886	;;
887	srlz.i					// guarantee that interruption collection is on
888	cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
889(p15)	ssm psr.i		// restore psr.i
890	mov.m ar.fpsr=r17
891	stf8 [r8]=f1            // ensure pt_regs.r8 != 0 (see handle_syscall_error)
892(p10)	mov r8=-EINVAL
893	br.ret.sptk.many b7
894END(ia64_syscall_setup)
895
896	.align 1024
897/////////////////////////////////////////////////////////////////////////////////////////
898// 0x3c00 Entry 15 (size 64 bundles) Reserved
899	DBG_FAULT(15)
900	FAULT(15)
901
902	/*
903	 * Squatting in this space ...
904	 *
905	 * This special case dispatcher for illegal operation faults allows preserved
906	 * registers to be modified through a callback function (asm only) that is handed
907	 * back from the fault handler in r8. Up to three arguments can be passed to the
908	 * callback function by returning an aggregate with the callback as its first
909	 * element, followed by the arguments.
910	 */
911ENTRY(dispatch_illegal_op_fault)
912	SAVE_MIN_WITH_COVER
913	ssm psr.ic | PSR_DEFAULT_BITS
914	;;
915	srlz.i		// guarantee that interruption collection is on
916	;;
917(p15)	ssm psr.i	// restore psr.i
918	adds r3=8,r2	// set up second base pointer for SAVE_REST
919	;;
920	alloc r14=ar.pfs,0,0,1,0	// must be first in insn group
921	mov out0=ar.ec
922	;;
923	SAVE_REST
924	;;
925	br.call.sptk.many rp=ia64_illegal_op_fault
926.ret0:	;;
927	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
928	mov out0=r9
929	mov out1=r10
930	mov out2=r11
931	movl r15=ia64_leave_kernel
932	;;
933	mov rp=r15
934	mov b6=r8
935	;;
936	cmp.ne p6,p0=0,r8
937(p6)	br.call.dpnt.many b6=b6		// call returns to ia64_leave_kernel
938	br.sptk.many ia64_leave_kernel
939END(dispatch_illegal_op_fault)
940
941	.align 1024
942/////////////////////////////////////////////////////////////////////////////////////////
943// 0x4000 Entry 16 (size 64 bundles) Reserved
944	DBG_FAULT(16)
945	FAULT(16)
946
947	.align 1024
948/////////////////////////////////////////////////////////////////////////////////////////
949// 0x4400 Entry 17 (size 64 bundles) Reserved
950	DBG_FAULT(17)
951	FAULT(17)
952
953ENTRY(non_syscall)
954	SAVE_MIN_WITH_COVER
955
956	// There is no particular reason for this code to be here, other than that
957	// there happens to be space here that would go unused otherwise.  If this
958	// fault ever gets "unreserved", simply moved the following code to a more
959	// suitable spot...
960
961	alloc r14=ar.pfs,0,0,2,0
962	mov out0=cr.iim
963	add out1=16,sp
964	adds r3=8,r2			// set up second base pointer for SAVE_REST
965
966	ssm psr.ic | PSR_DEFAULT_BITS
967	;;
968	srlz.i				// guarantee that interruption collection is on
969	;;
970(p15)	ssm psr.i			// restore psr.i
971	movl r15=ia64_leave_kernel
972	;;
973	SAVE_REST
974	mov rp=r15
975	;;
976	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
977END(non_syscall)
978
979	.align 1024
980/////////////////////////////////////////////////////////////////////////////////////////
981// 0x4800 Entry 18 (size 64 bundles) Reserved
982	DBG_FAULT(18)
983	FAULT(18)
984
985	/*
986	 * There is no particular reason for this code to be here, other than that
987	 * there happens to be space here that would go unused otherwise.  If this
988	 * fault ever gets "unreserved", simply moved the following code to a more
989	 * suitable spot...
990	 */
991
992ENTRY(dispatch_unaligned_handler)
993	SAVE_MIN_WITH_COVER
994	;;
995	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
996	mov out0=cr.ifa
997	adds out1=16,sp
998
999	ssm psr.ic | PSR_DEFAULT_BITS
1000	;;
1001	srlz.i					// guarantee that interruption collection is on
1002	;;
1003(p15)	ssm psr.i				// restore psr.i
1004	adds r3=8,r2				// set up second base pointer
1005	;;
1006	SAVE_REST
1007	movl r14=ia64_leave_kernel
1008	;;
1009	mov rp=r14
1010	br.sptk.many ia64_prepare_handle_unaligned
1011END(dispatch_unaligned_handler)
1012
1013	.align 1024
1014/////////////////////////////////////////////////////////////////////////////////////////
1015// 0x4c00 Entry 19 (size 64 bundles) Reserved
1016	DBG_FAULT(19)
1017	FAULT(19)
1018
1019	/*
1020	 * There is no particular reason for this code to be here, other than that
1021	 * there happens to be space here that would go unused otherwise.  If this
1022	 * fault ever gets "unreserved", simply moved the following code to a more
1023	 * suitable spot...
1024	 */
1025
1026ENTRY(dispatch_to_fault_handler)
1027	/*
1028	 * Input:
1029	 *	psr.ic:	off
1030	 *	r19:	fault vector number (e.g., 24 for General Exception)
1031	 *	r31:	contains saved predicates (pr)
1032	 */
1033	SAVE_MIN_WITH_COVER_R19
1034	alloc r14=ar.pfs,0,0,5,0
1035	mov out0=r15
1036	mov out1=cr.isr
1037	mov out2=cr.ifa
1038	mov out3=cr.iim
1039	mov out4=cr.itir
1040	;;
1041	ssm psr.ic | PSR_DEFAULT_BITS
1042	;;
1043	srlz.i					// guarantee that interruption collection is on
1044	;;
1045(p15)	ssm psr.i				// restore psr.i
1046	adds r3=8,r2				// set up second base pointer for SAVE_REST
1047	;;
1048	SAVE_REST
1049	movl r14=ia64_leave_kernel
1050	;;
1051	mov rp=r14
1052	br.call.sptk.many b6=ia64_fault
1053END(dispatch_to_fault_handler)
1054
1055//
1056// --- End of long entries, Beginning of short entries
1057//
1058
1059	.align 1024
1060/////////////////////////////////////////////////////////////////////////////////////////
1061// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1062ENTRY(page_not_present)
1063	DBG_FAULT(20)
1064	mov r16=cr.ifa
1065	rsm psr.dt
1066	/*
1067	 * The Linux page fault handler doesn't expect non-present pages to be in
1068	 * the TLB.  Flush the existing entry now, so we meet that expectation.
1069	 */
1070	mov r17=PAGE_SHIFT<<2
1071	;;
1072	ptc.l r16,r17
1073	;;
1074	mov r31=pr
1075	srlz.d
1076	br.sptk.many page_fault
1077END(page_not_present)
1078
1079	.align 256
1080/////////////////////////////////////////////////////////////////////////////////////////
1081// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1082ENTRY(key_permission)
1083	DBG_FAULT(21)
1084	mov r16=cr.ifa
1085	rsm psr.dt
1086	mov r31=pr
1087	;;
1088	srlz.d
1089	br.sptk.many page_fault
1090END(key_permission)
1091
1092	.align 256
1093/////////////////////////////////////////////////////////////////////////////////////////
1094// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1095ENTRY(iaccess_rights)
1096	DBG_FAULT(22)
1097	mov r16=cr.ifa
1098	rsm psr.dt
1099	mov r31=pr
1100	;;
1101	srlz.d
1102	br.sptk.many page_fault
1103END(iaccess_rights)
1104
1105	.align 256
1106/////////////////////////////////////////////////////////////////////////////////////////
1107// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1108ENTRY(daccess_rights)
1109	DBG_FAULT(23)
1110	mov r16=cr.ifa
1111	rsm psr.dt
1112	mov r31=pr
1113	;;
1114	srlz.d
1115	br.sptk.many page_fault
1116END(daccess_rights)
1117
1118	.align 256
1119/////////////////////////////////////////////////////////////////////////////////////////
1120// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1121ENTRY(general_exception)
1122	DBG_FAULT(24)
1123	mov r16=cr.isr
1124	mov r31=pr
1125	;;
1126	cmp4.eq p6,p0=0,r16
1127(p6)	br.sptk.many dispatch_illegal_op_fault
1128	;;
1129	mov r19=24		// fault number
1130	br.sptk.many dispatch_to_fault_handler
1131END(general_exception)
1132
1133	.align 256
1134/////////////////////////////////////////////////////////////////////////////////////////
1135// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1136ENTRY(disabled_fp_reg)
1137	DBG_FAULT(25)
1138	rsm psr.dfh		// ensure we can access fph
1139	;;
1140	srlz.d
1141	mov r31=pr
1142	mov r19=25
1143	br.sptk.many dispatch_to_fault_handler
1144END(disabled_fp_reg)
1145
1146	.align 256
1147/////////////////////////////////////////////////////////////////////////////////////////
1148// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1149ENTRY(nat_consumption)
1150	DBG_FAULT(26)
1151	FAULT(26)
1152END(nat_consumption)
1153
1154	.align 256
1155/////////////////////////////////////////////////////////////////////////////////////////
1156// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1157ENTRY(speculation_vector)
1158	DBG_FAULT(27)
1159	/*
1160	 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1161	 * this part of the architecture is not implemented in hardware on some CPUs, such
1162	 * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
1163	 * the relative target (not yet sign extended).  So after sign extending it we
1164	 * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
1165	 * i.e., the slot to restart into.
1166	 *
1167	 * cr.imm contains zero_ext(imm21)
1168	 */
1169	mov r18=cr.iim
1170	;;
1171	mov r17=cr.iip
1172	shl r18=r18,43			// put sign bit in position (43=64-21)
1173	;;
1174
1175	mov r16=cr.ipsr
1176	shr r18=r18,39			// sign extend (39=43-4)
1177	;;
1178
1179	add r17=r17,r18			// now add the offset
1180	;;
1181	mov cr.iip=r17
1182	dep r16=0,r16,41,2		// clear EI
1183	;;
1184
1185	mov cr.ipsr=r16
1186	;;
1187
1188	rfi				// and go back
1189END(speculation_vector)
1190
1191	.align 256
1192/////////////////////////////////////////////////////////////////////////////////////////
1193// 0x5800 Entry 28 (size 16 bundles) Reserved
1194	DBG_FAULT(28)
1195	FAULT(28)
1196
1197	.align 256
1198/////////////////////////////////////////////////////////////////////////////////////////
1199// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1200ENTRY(debug_vector)
1201	DBG_FAULT(29)
1202	FAULT(29)
1203END(debug_vector)
1204
1205	.align 256
1206/////////////////////////////////////////////////////////////////////////////////////////
1207// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1208ENTRY(unaligned_access)
1209	DBG_FAULT(30)
1210	mov r16=cr.ipsr
1211	mov r31=pr		// prepare to save predicates
1212	;;
1213	br.sptk.many dispatch_unaligned_handler
1214END(unaligned_access)
1215
1216	.align 256
1217/////////////////////////////////////////////////////////////////////////////////////////
1218// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1219	DBG_FAULT(31)
1220	FAULT(31)
1221
1222	.align 256
1223/////////////////////////////////////////////////////////////////////////////////////////
1224// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1225	DBG_FAULT(32)
1226	FAULT(32)
1227
1228	.align 256
1229/////////////////////////////////////////////////////////////////////////////////////////
1230// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1231	DBG_FAULT(33)
1232	FAULT(33)
1233
1234	.align 256
1235/////////////////////////////////////////////////////////////////////////////////////////
1236// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Tranfer Trap (66)
1237	DBG_FAULT(34)
1238	FAULT(34)
1239
1240	.align 256
1241/////////////////////////////////////////////////////////////////////////////////////////
1242// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1243	DBG_FAULT(35)
1244	FAULT(35)
1245
1246	.align 256
1247/////////////////////////////////////////////////////////////////////////////////////////
1248// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1249	DBG_FAULT(36)
1250	FAULT(36)
1251
1252	.align 256
1253/////////////////////////////////////////////////////////////////////////////////////////
1254// 0x6100 Entry 37 (size 16 bundles) Reserved
1255	DBG_FAULT(37)
1256	FAULT(37)
1257
1258	.align 256
1259/////////////////////////////////////////////////////////////////////////////////////////
1260// 0x6200 Entry 38 (size 16 bundles) Reserved
1261	DBG_FAULT(38)
1262	FAULT(38)
1263
1264	.align 256
1265/////////////////////////////////////////////////////////////////////////////////////////
1266// 0x6300 Entry 39 (size 16 bundles) Reserved
1267	DBG_FAULT(39)
1268	FAULT(39)
1269
1270	.align 256
1271/////////////////////////////////////////////////////////////////////////////////////////
1272// 0x6400 Entry 40 (size 16 bundles) Reserved
1273	DBG_FAULT(40)
1274	FAULT(40)
1275
1276	.align 256
1277/////////////////////////////////////////////////////////////////////////////////////////
1278// 0x6500 Entry 41 (size 16 bundles) Reserved
1279	DBG_FAULT(41)
1280	FAULT(41)
1281
1282	.align 256
1283/////////////////////////////////////////////////////////////////////////////////////////
1284// 0x6600 Entry 42 (size 16 bundles) Reserved
1285	DBG_FAULT(42)
1286	FAULT(42)
1287
1288	.align 256
1289/////////////////////////////////////////////////////////////////////////////////////////
1290// 0x6700 Entry 43 (size 16 bundles) Reserved
1291	DBG_FAULT(43)
1292	FAULT(43)
1293
1294	.align 256
1295/////////////////////////////////////////////////////////////////////////////////////////
1296// 0x6800 Entry 44 (size 16 bundles) Reserved
1297	DBG_FAULT(44)
1298	FAULT(44)
1299
1300	.align 256
1301/////////////////////////////////////////////////////////////////////////////////////////
1302// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1303ENTRY(ia32_exception)
1304	DBG_FAULT(45)
1305	FAULT(45)
1306END(ia32_exception)
1307
1308	.align 256
1309/////////////////////////////////////////////////////////////////////////////////////////
1310// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
1311ENTRY(ia32_intercept)
1312	DBG_FAULT(46)
1313#ifdef	CONFIG_IA32_SUPPORT
1314	mov r31=pr
1315	mov r16=cr.isr
1316	;;
1317	extr.u r17=r16,16,8	// get ISR.code
1318	mov r18=ar.eflag
1319	mov r19=cr.iim		// old eflag value
1320	;;
1321	cmp.ne p6,p0=2,r17
1322(p6)	br.cond.spnt 1f		// not a system flag fault
1323	xor r16=r18,r19
1324	;;
1325	extr.u r17=r16,18,1	// get the eflags.ac bit
1326	;;
1327	cmp.eq p6,p0=0,r17
1328(p6)	br.cond.spnt 1f		// eflags.ac bit didn't change
1329	;;
1330	mov pr=r31,-1		// restore predicate registers
1331	rfi
1332
13331:
1334#endif	// CONFIG_IA32_SUPPORT
1335	FAULT(46)
1336END(ia32_intercept)
1337
1338	.align 256
1339/////////////////////////////////////////////////////////////////////////////////////////
1340// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
1341ENTRY(ia32_interrupt)
1342	DBG_FAULT(47)
1343#ifdef CONFIG_IA32_SUPPORT
1344	mov r31=pr
1345	br.sptk.many dispatch_to_ia32_handler
1346#else
1347	FAULT(47)
1348#endif
1349END(ia32_interrupt)
1350
1351	.align 256
1352/////////////////////////////////////////////////////////////////////////////////////////
1353// 0x6c00 Entry 48 (size 16 bundles) Reserved
1354	DBG_FAULT(48)
1355	FAULT(48)
1356
1357	.align 256
1358/////////////////////////////////////////////////////////////////////////////////////////
1359// 0x6d00 Entry 49 (size 16 bundles) Reserved
1360	DBG_FAULT(49)
1361	FAULT(49)
1362
1363	.align 256
1364/////////////////////////////////////////////////////////////////////////////////////////
1365// 0x6e00 Entry 50 (size 16 bundles) Reserved
1366	DBG_FAULT(50)
1367	FAULT(50)
1368
1369	.align 256
1370/////////////////////////////////////////////////////////////////////////////////////////
1371// 0x6f00 Entry 51 (size 16 bundles) Reserved
1372	DBG_FAULT(51)
1373	FAULT(51)
1374
1375	.align 256
1376/////////////////////////////////////////////////////////////////////////////////////////
1377// 0x7000 Entry 52 (size 16 bundles) Reserved
1378	DBG_FAULT(52)
1379	FAULT(52)
1380
1381	.align 256
1382/////////////////////////////////////////////////////////////////////////////////////////
1383// 0x7100 Entry 53 (size 16 bundles) Reserved
1384	DBG_FAULT(53)
1385	FAULT(53)
1386
1387	.align 256
1388/////////////////////////////////////////////////////////////////////////////////////////
1389// 0x7200 Entry 54 (size 16 bundles) Reserved
1390	DBG_FAULT(54)
1391	FAULT(54)
1392
1393	.align 256
1394/////////////////////////////////////////////////////////////////////////////////////////
1395// 0x7300 Entry 55 (size 16 bundles) Reserved
1396	DBG_FAULT(55)
1397	FAULT(55)
1398
1399	.align 256
1400/////////////////////////////////////////////////////////////////////////////////////////
1401// 0x7400 Entry 56 (size 16 bundles) Reserved
1402	DBG_FAULT(56)
1403	FAULT(56)
1404
1405	.align 256
1406/////////////////////////////////////////////////////////////////////////////////////////
1407// 0x7500 Entry 57 (size 16 bundles) Reserved
1408	DBG_FAULT(57)
1409	FAULT(57)
1410
1411	.align 256
1412/////////////////////////////////////////////////////////////////////////////////////////
1413// 0x7600 Entry 58 (size 16 bundles) Reserved
1414	DBG_FAULT(58)
1415	FAULT(58)
1416
1417	.align 256
1418/////////////////////////////////////////////////////////////////////////////////////////
1419// 0x7700 Entry 59 (size 16 bundles) Reserved
1420	DBG_FAULT(59)
1421	FAULT(59)
1422
1423	.align 256
1424/////////////////////////////////////////////////////////////////////////////////////////
1425// 0x7800 Entry 60 (size 16 bundles) Reserved
1426	DBG_FAULT(60)
1427	FAULT(60)
1428
1429	.align 256
1430/////////////////////////////////////////////////////////////////////////////////////////
1431// 0x7900 Entry 61 (size 16 bundles) Reserved
1432	DBG_FAULT(61)
1433	FAULT(61)
1434
1435	.align 256
1436/////////////////////////////////////////////////////////////////////////////////////////
1437// 0x7a00 Entry 62 (size 16 bundles) Reserved
1438	DBG_FAULT(62)
1439	FAULT(62)
1440
1441	.align 256
1442/////////////////////////////////////////////////////////////////////////////////////////
1443// 0x7b00 Entry 63 (size 16 bundles) Reserved
1444	DBG_FAULT(63)
1445	FAULT(63)
1446
1447	.align 256
1448/////////////////////////////////////////////////////////////////////////////////////////
1449// 0x7c00 Entry 64 (size 16 bundles) Reserved
1450	DBG_FAULT(64)
1451	FAULT(64)
1452
1453	.align 256
1454/////////////////////////////////////////////////////////////////////////////////////////
1455// 0x7d00 Entry 65 (size 16 bundles) Reserved
1456	DBG_FAULT(65)
1457	FAULT(65)
1458
1459	.align 256
1460/////////////////////////////////////////////////////////////////////////////////////////
1461// 0x7e00 Entry 66 (size 16 bundles) Reserved
1462	DBG_FAULT(66)
1463	FAULT(66)
1464
1465	.align 256
1466/////////////////////////////////////////////////////////////////////////////////////////
1467// 0x7f00 Entry 67 (size 16 bundles) Reserved
1468	DBG_FAULT(67)
1469	FAULT(67)
1470
1471#ifdef CONFIG_IA32_SUPPORT
1472
1473	/*
1474	 * There is no particular reason for this code to be here, other than that
1475	 * there happens to be space here that would go unused otherwise.  If this
1476	 * fault ever gets "unreserved", simply moved the following code to a more
1477	 * suitable spot...
1478	 */
1479
1480	// IA32 interrupt entry point
1481
1482ENTRY(dispatch_to_ia32_handler)
1483	SAVE_MIN
1484	;;
1485	mov r14=cr.isr
1486	ssm psr.ic | PSR_DEFAULT_BITS
1487	;;
1488	srlz.i					// guarantee that interruption collection is on
1489	;;
1490(p15)	ssm psr.i
1491	adds r3=8,r2            // Base pointer for SAVE_REST
1492	;;
1493	SAVE_REST
1494	;;
1495	mov r15=0x80
1496	shr r14=r14,16          // Get interrupt number
1497	;;
1498	cmp.ne p6,p0=r14,r15
1499(p6)	br.call.dpnt.many b6=non_ia32_syscall
1500
1501	adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp	// 16 byte hole per SW conventions
1502	adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
1503	;;
1504	cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1505	ld8 r8=[r14]		// get r8
1506	;;
1507	st8 [r15]=r8		// save original EAX in r1 (IA32 procs don't use the GP)
1508	;;
1509	alloc r15=ar.pfs,0,0,6,0	// must first in an insn group
1510	;;
1511	ld4 r8=[r14],8		// r8 == eax (syscall number)
1512	mov r15=230		// number of entries in ia32 system call table
1513	;;
1514	cmp.ltu.unc p6,p7=r8,r15
1515	ld4 out1=[r14],8	// r9 == ecx
1516	;;
1517	ld4 out2=[r14],8	// r10 == edx
1518	;;
1519	ld4 out0=[r14]		// r11 == ebx
1520	adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
1521	;;
1522	ld4 out5=[r14],PT(R14)-PT(R13)	// r13 == ebp
1523	;;
1524	ld4 out3=[r14],PT(R15)-PT(R14)	// r14 == esi
1525	adds r2=IA64_TASK_PTRACE_OFFSET,r13	// r2 = &current->ptrace
1526	;;
1527	ld4 out4=[r14]		// r15 == edi
1528	movl r16=ia32_syscall_table
1529	;;
1530(p6)	shladd r16=r8,3,r16	// force ni_syscall if not valid syscall number
1531	ld8 r2=[r2]		// r2 = current->ptrace
1532	;;
1533	ld8 r16=[r16]
1534	tbit.z p8,p0=r2,PT_TRACESYS_BIT	// (current->ptrace & PT_TRACESYS) == 0?
1535	;;
1536	mov b6=r16
1537	movl r15=ia32_ret_from_syscall
1538	;;
1539	mov rp=r15
1540(p8)	br.call.sptk.many b6=b6
1541	br.cond.sptk ia32_trace_syscall
1542
1543non_ia32_syscall:
1544	alloc r15=ar.pfs,0,0,2,0
1545	mov out0=r14				// interrupt #
1546	add out1=16,sp				// pointer to pt_regs
1547	;;			// avoid WAW on CFM
1548	br.call.sptk.many rp=ia32_bad_interrupt
1549.ret1:	movl r15=ia64_leave_kernel
1550	;;
1551	mov rp=r15
1552	br.ret.sptk.many rp
1553END(dispatch_to_ia32_handler)
1554
1555#endif /* CONFIG_IA32_SUPPORT */
1556