1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Silicon Graphics, Inc.
7 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
8 * Copyright (C) 2002  Maciej W. Rozycki
9 */
10#include <linux/config.h>
11#include <linux/init.h>
12#include <linux/threads.h>
13
14#include <asm/asm.h>
15#include <asm/hazards.h>
16#include <asm/regdef.h>
17#include <asm/mipsregs.h>
18#include <asm/pgtable.h>
19#include <asm/stackframe.h>
20#include <asm/war.h>
21
22#define PGD_INDX_MASK	((_PTRS_PER_PGD - 1) << _PGD_T_LOG2)
23#define PMD_INDX_MASK	((_PTRS_PER_PMD - 1) << _PMD_T_LOG2)
24#define PTE_INDX_MASK	((_PTRS_PER_PTE - 1) << _PTE_T_LOG2)
25#define PTEP_INDX_MASK	((_PTRS_PER_PTE >> 1 - 1) << (_PTE_T_LOG2 + 1))
26
27	.data
28	.comm	pgd_current, NR_CPUS * 8, 8
29
30	/*
31	 * After this macro runs we have a pointer to the pte of the address
32	 * that caused the fault in PTR.
33	 */
34	.macro	LOAD_PTE2, ptr, tmp, kaddr
35#ifdef CONFIG_SMP
36	dmfc0	\ptr, CP0_CONTEXT
37	dmfc0	\tmp, CP0_BADVADDR
38	dsra	\ptr, 23			# get pgd_current[cpu]
39#else
40	dmfc0	\tmp, CP0_BADVADDR
41	dla	\ptr, pgd_current
42#endif
43	bltz	\tmp, \kaddr
44	 ld	\ptr, (\ptr)
45	dsrl	\tmp, PGDIR_SHIFT - 3		# get pgd offset in bytes
46	andi	\tmp, PGD_INDX_MASK
47	daddu	\ptr, \tmp			# add in pgd offset
48	dmfc0	\tmp, CP0_BADVADDR
49	ld	\ptr, (\ptr)			# get pmd pointer
50	dsrl	\tmp, PMD_SHIFT - 3		# get pmd offset in bytes
51	andi	\tmp, PMD_INDX_MASK
52	daddu	\ptr, \tmp			# add in pmd offset
53	dmfc0	\tmp, CP0_XCONTEXT
54	ld	\ptr, (\ptr)			# get pte pointer
55	andi	\tmp, PTEP_INDX_MASK		# get pte offset
56	daddu	\ptr, \tmp
57	.endm
58
59
60	/*
61	 * Ditto for the kernel table.
62	 */
63	.macro	LOAD_KPTE2, ptr, tmp, not_vmalloc
64	/*
65	 * First, determine that the address is in/above vmalloc range.
66	 */
67	dmfc0	\tmp, CP0_BADVADDR
68	dli	\ptr, VMALLOC_START
69
70	/*
71	 * Now find offset into kptbl.
72	 */
73	dsubu	\tmp, \tmp, \ptr
74	dla	\ptr, kptbl
75	dsrl	\tmp, _PAGE_SHIFT + 1		# get vpn2
76	dsll	\tmp, 4				# byte offset of pte
77	daddu	\ptr, \ptr, \tmp
78
79	/*
80	 * Determine that fault address is within vmalloc range.
81	 */
82	dla	\tmp, ekptbl
83	slt	\tmp, \ptr, \tmp
84	beqz	\tmp, \not_vmalloc		# not vmalloc
85	 nop
86	.endm
87
88
89	/*
90	 * This places the even/odd pte pair in the page table at the pte
91	 * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1.
92	 */
93	.macro	PTE_RELOAD, pte0, pte1
94	dsrl	\pte0, 6			# convert to entrylo0
95	dmtc0	\pte0, CP0_ENTRYLO0		# load it
96	dsrl	\pte1, 6			# convert to entrylo1
97	dmtc0	\pte1, CP0_ENTRYLO1		# load it
98	.endm
99
100
101	.text
102	.set	noreorder
103	.set	mips3
104
105	__INIT
106
107	.align	5
108LEAF(except_vec0_generic)
109	.set	noat
110	PANIC("Unused vector called")
1111:	b	1b
112	 nop
113END(except_vec0_generic)
114
115
116	/*
117	 * TLB refill handlers for the R4000 and SB1.
118	 * Attention:  We may only use 32 instructions / 128 bytes.
119	 */
120	.align  5
121LEAF(except_vec1_r4k)
122	.set    noat
123	dla     k0, handle_vec1_r4k
124	jr      k0
125	 nop
126END(except_vec1_r4k)
127
128LEAF(except_vec1_sb1)
129#if BCM1250_M3_WAR
130	dmfc0	k0, CP0_BADVADDR
131	dmfc0	k1, CP0_ENTRYHI
132	xor	k0, k1
133	dsrl	k0, k0, _PAGE_SHIFT + 1
134	bnez	k0, 1f
135#endif
136	.set    noat
137	dla     k0, handle_vec1_r4k
138	jr      k0
139	 nop
140
1411:	eret
142	nop
143END(except_vec1_sb1)
144
145	__FINIT
146
147	.align  5
148LEAF(handle_vec1_r4k)
149	.set    noat
150	LOAD_PTE2 k1 k0 9f
151	ld	k0, 0(k1)			# get even pte
152	ld	k1, 8(k1)			# get odd pte
153	PTE_RELOAD k0 k1
154	rm9000_tlb_hazard
155	b	1f
156	 tlbwr
1571:	nop
158	rm9000_tlb_hazard
159	eret
160
1619:						# handle the vmalloc range
162	LOAD_KPTE2 k1 k0 invalid_vmalloc_address
163	ld	k0, 0(k1)			# get even pte
164	ld	k1, 8(k1)			# get odd pte
165	PTE_RELOAD k0 k1
166	rm9000_tlb_hazard
167	b	1f
168	 tlbwr
1691:	nop
170	rm9000_tlb_hazard
171	eret
172END(handle_vec1_r4k)
173
174
175	__INIT
176
177	/*
178	 * TLB refill handler for the R10000.
179	 * Attention:  We may only use 32 instructions / 128 bytes.
180	 */
181	.align	5
182LEAF(except_vec1_r10k)
183	.set    noat
184	dla     k0, handle_vec1_r10k
185	jr      k0
186	 nop
187END(except_vec1_r10k)
188
189	__FINIT
190
191	.align	5
192LEAF(handle_vec1_r10k)
193	.set	noat
194	LOAD_PTE2 k1 k0 9f
195	ld	k0, 0(k1)			# get even pte
196	ld	k1, 8(k1)			# get odd pte
197	PTE_RELOAD k0 k1
198	rm9000_tlb_hazard
199	nop
200	tlbwr
201	rm9000_tlb_hazard
202	eret
203
2049:						# handle the vmalloc range
205	LOAD_KPTE2 k1 k0 invalid_vmalloc_address
206	ld	k0, 0(k1)			# get even pte
207	ld	k1, 8(k1)			# get odd pte
208	PTE_RELOAD k0 k1
209	rm9000_tlb_hazard
210	nop
211	tlbwr
212	rm9000_tlb_hazard
213	eret
214END(handle_vec1_r10k)
215
216
217	.align	5
218LEAF(invalid_vmalloc_address)
219	.set	noat
220	PANIC("Invalid kernel address")
2211:	b	1b
222	 nop
223END(invalid_vmalloc_address)
224