1/* -*- mode: fundamental -*- */
2
3/*
4 * SN2 specific ivt(s)
5 *
6 * archi/ia64/sn/kernel/setup.c will dynamically replace code in the
7 * ivt with code from here
8 *
9 * Please note: We need to be sure that any changes in ivt.S are also
10 * reflected here (for example, if the ia64_ivt_page_fault register
11 * usage changes) and vice-versa.
12 *
13 * Copyright (c) 1998-2003 Hewlett-Packard Company
14 *	Stephane Eranian <eranian@hpl.hp.com>
15 *	David Mosberger <davidm@hpl.hp.com>
16 *
17 * Copyright (c) 2003 Silicon Graphics, Inc.  All Rights Reserved.
18 *
19 */
20
21#include <linux/config.h>
22
23#include <asm/asmmacro.h>
24#include <asm/break.h>
25#include <asm/kregs.h>
26#include <asm/offsets.h>
27#include <asm/pgtable.h>
28#include <asm/processor.h>
29#include <asm/ptrace.h>
30#include <asm/system.h>
31#include <asm/unistd.h>
32
33
34/* If someone has a *really* good reason to disable the VHPT for SN2 I'll fix this. --cw */
35#ifdef CONFIG_DISABLE_VHPT
36#error SN2 requires VHPT be enabled
37#endif
38
39	.section ".text.init", "ax"
40
41/*
42 * SN2 specific region 6/7 dtlb miss handler.
43 *
44 * On SN2 some granule-0 addresses (and therefore any aliases) are
45 * actually used uncachable.  We load small TC entries to ensure there
46 * is no-overlap between such regions (which could cause a Cache-Check
47 * MCA).
48 */
49#define SN2_GMASK	(((1 << (36-IA64_GRANULE_SHIFT)) - 1) << IA64_GRANULE_SHIFT)
50	.global sn2_alt_dtlb_miss
51ENTRY(sn2_alt_dtlb_miss)
52	mov r16=cr.ifa					// get address that caused the TLB miss
53	movl r17=PAGE_KERNEL				// kernel protection bits (RWX)
54	mov r20=cr.isr					// need to check for SP and NA status
55	movl r19=(((1<<IA64_MAX_PHYS_BITS)-1) & ~0xfff)	// suitable mask
56	mov r21=cr.ipsr					// get ipsr incase we need to poke the ED bit
57	mov r31=pr					// save pr's
58	mov r25=_PAGE_SIZE_4K<<2				// granule-0 requires we use smaller pages
59	;;
60	movl r27=SN2_GMASK				// Mask suitable to fine granule-0 (aliased) addresses
61	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT			// is non-access bit on?
62	and r22=IA64_ISR_CODE_MASK,r20			// get the isr.code field
63	extr.u r23=r21,IA64_PSR_CPL0_BIT,2			// extract psr.cpl
64	shr.u r26=r16,61					// region number
65	;;
66	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT			// is speculation bit on?
67	and r19=r19,r16					// clear ED, reserved bits, and PTE control bits
68	cmp.eq p10,p11=7,r26				// p11 <- region 7 (else p10 <- !0 => region 6)
69	and r24=r27,r16					// mask away all but region bits
70	;;
71	cmp.ne.andcm p10,p0=r0,r24			// p10 <- region-6 AND granule-0
72
73/* arch/ia64/sn/kernel/setup.c patches this code, you should check there if you need to mess about with this */
74	.global sn2_alt_dtlb_miss_patch1
75sn2_alt_dtlb_miss_patch1:
76	{ .mib
77(p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
78	cmp.ne p8,p0=r0,r23				// p8 <- cpl == 0?
79(p8)	br.cond.spnt.many ia64_ivt_page_fault		// NB: gets patched
80	}
81
82	;;
83(p10)	mov cr.itir=r25					// use smaller pagesize for tc
84	dep r21=-1,r21,IA64_PSR_ED_BIT,1			// r21 is psr with ED enabled
85	or r19=r19,r17					// insert PTE control bits into r19
86	;;
87(p6)	mov cr.ipsr=r21					// p6 (speculation): set ed (else we can get stuck)
88(p11)	dep r19=-1,r19,4,1				// set bit 4 (uncached) if the access was to region 6
89	;;
90(p7)	itc.d r19					// insert the TLB entry
91	mov pr=r31,-1					// restore pr's
92	rfi
93END(alt_dtlb_miss)
94	.align 8
95	.global sn2_alt_dtlb_miss_end
96sn2_alt_dtlb_miss_end:
97