1/*
2 * TLB exception handling code for r4k.
3 *
4 * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
5 *
6 * Multi-cpu abstraction and reworking:
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
8 *
9 * Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
11 */
12#include <linux/config.h>
13#include <linux/init.h>
14
15#include <asm/asm.h>
16#include <asm/current.h>
17#include <asm/offset.h>
18#include <asm/cachectl.h>
19#include <asm/fpregdef.h>
20#include <asm/mipsregs.h>
21#include <asm/page.h>
22#include <asm/pgtable-bits.h>
23#include <asm/processor.h>
24#include <asm/regdef.h>
25#include <asm/stackframe.h>
26#include <asm/war.h>
27
28#define TLB_OPTIMIZE /* If you are paranoid, disable this. */
29
30#ifdef CONFIG_64BIT_PHYS_ADDR
31#define PTE_L		ld
32#define PTE_S		sd
33#define PTE_SRL		dsrl
34#define P_MTC0		dmtc0
35#else
36#define PTE_L		lw
37#define PTE_S		sw
38#define PTE_SRL		srl
39#define P_MTC0		mtc0
40#endif
41
42#define PTE_PAGE_SIZE	(_PAGE_SIZE << _PTE_ORDER)
43#define PTE_PAGE_SHIFT	(_PAGE_SHIFT + _PTE_ORDER)
44#define PTEP_INDX_MSK	((PTE_PAGE_SIZE - 1) & ~(_PTE_T_SIZE << 1 - 1))
45#define PTE_INDX_MSK	((PTE_PAGE_SIZE - 1) & ~(_PTE_T_SIZE - 1))
46#define PTE_INDX_SHIFT	(PTE_PAGE_SHIFT - _PTE_T_LOG2)
47
48/*
49 * ABUSE of CPP macros 101.
50 *
51 * After this macro runs, the pte faulted on is
52 * in register PTE, a ptr into the table in which
53 * the pte belongs is in PTR.
54 */
55
56#ifdef CONFIG_SMP
57#define GET_PGD(scratch, ptr)        \
58	mfc0    ptr, CP0_CONTEXT;    \
59	la      scratch, pgd_current;\
60	srl     ptr, 23;             \
61	sll     ptr, 2;              \
62	addu    ptr, scratch, ptr;   \
63	lw      ptr, (ptr);
64#else
65#define GET_PGD(scratch, ptr)    \
66	lw	ptr, pgd_current;
67#endif
68
69#define LOAD_PTE(pte, ptr) \
70	GET_PGD(pte, ptr)          \
71	mfc0	pte, CP0_BADVADDR; \
72	srl	pte, pte, _PGDIR_SHIFT; \
73	sll	pte, pte, _PGD_T_LOG2; \
74	addu	ptr, ptr, pte; \
75	mfc0	pte, CP0_BADVADDR; \
76	lw	ptr, (ptr); \
77	srl	pte, pte, PTE_INDX_SHIFT; \
78	and	pte, pte, PTE_INDX_MSK; \
79	addu	ptr, ptr, pte; \
80	PTE_L	pte, (ptr);
81
82	/* This places the even/odd pte pair in the page
83	 * table at PTR into ENTRYLO0 and ENTRYLO1 using
84	 * TMP as a scratch register.
85	 */
86#define PTE_RELOAD(ptr, tmp) \
87	ori	ptr, ptr, _PTE_T_SIZE; \
88	xori	ptr, ptr, _PTE_T_SIZE; \
89	PTE_L	tmp, _PTE_T_SIZE(ptr); \
90	PTE_L	ptr, 0(ptr); \
91	PTE_SRL	tmp, tmp, 6; \
92	P_MTC0	tmp, CP0_ENTRYLO1; \
93	PTE_SRL	ptr, ptr, 6; \
94	P_MTC0	ptr, CP0_ENTRYLO0;
95
96#define DO_FAULT(write) \
97	.set	noat; \
98	SAVE_ALL; \
99	mfc0	a2, CP0_BADVADDR; \
100	KMODE; \
101	.set	at; \
102	move	a0, sp; \
103	jal	do_page_fault; \
104	 li	a1, write; \
105	j	ret_from_exception; \
106	 nop; \
107	.set	noat;
108
109	/* Check is PTE is present, if not then jump to LABEL.
110	 * PTR points to the page table where this PTE is located,
111	 * when the macro is done executing PTE will be restored
112	 * with it's original value.
113	 */
114#define PTE_PRESENT(pte, ptr, label) \
115	andi	pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
116	xori	pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
117	bnez	pte, label; \
118	 PTE_L	pte, (ptr);
119
120	/* Make PTE valid, store result in PTR. */
121#define PTE_MAKEVALID(pte, ptr) \
122	ori	pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
123	PTE_S	pte, (ptr);
124
125	/* Check if PTE can be written to, if not branch to LABEL.
126	 * Regardless restore PTE with value from PTR when done.
127	 */
128#define PTE_WRITABLE(pte, ptr, label) \
129	andi	pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
130	xori	pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
131	bnez	pte, label; \
132	 PTE_L	pte, (ptr);
133
134	/* Make PTE writable, update software status bits as well,
135	 * then store at PTR.
136	 */
137#define PTE_MAKEWRITE(pte, ptr) \
138	ori	pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
139			   _PAGE_VALID | _PAGE_DIRTY); \
140	PTE_S	pte, (ptr);
141
142	__INIT
143
144/*
145 * Different for VR41xx because it supports 1k as smallest page size
146 */
147#ifdef CONFIG_CPU_VR41XX
148#define BASE_VPN_SHIFT	6
149#else
150#define BASE_VPN_SHIFT	4
151#endif
152
153#if (BASE_VPN_SHIFT- PTE_T_LOG2-1) > 0
154#define GET_PTE_OFF(reg)	srl	reg, reg, BASE_VPN_SHIFT-_PTE_T_LOG2-1
155#else
156#define GET_PTE_OFF(reg)
157#endif
158
159
160/*
161 * These handlers much be written in a relocatable manner
162 * because based upon the cpu type an arbitrary one of the
163 * following pieces of code will be copied to the KSEG0
164 * vector location.
165 */
166	/* TLB refill, EXL == 0, R4xx0, non-R4600 version */
167	.set	noreorder
168	.set	noat
169	LEAF(except_vec0_r4000)
170	.set	mips3
171	GET_PGD(k0, k1)				# get pgd pointer
172	mfc0	k0, CP0_BADVADDR		# Get faulting address
173	srl	k0, k0, _PGDIR_SHIFT		# get pgd only bits
174	sll	k0, k0, _PGD_T_LOG2
175	addu	k1, k1, k0			# add in pgd offset
176	mfc0	k0, CP0_CONTEXT			# get context reg
177	lw	k1, (k1)
178	GET_PTE_OFF(k0)				# get pte offset
179	and	k0, k0, PTEP_INDX_MSK
180	addu	k1, k1, k0			# add in offset
181	PTE_L	k0, 0(k1)			# get even pte
182	PTE_L	k1, _PTE_T_SIZE(k1)		# get odd pte
183	PTE_SRL	k0, k0, 6			# convert to entrylo0
184	P_MTC0	k0, CP0_ENTRYLO0		# load it
185	PTE_SRL	k1, k1, 6			# convert to entrylo1
186	P_MTC0	k1, CP0_ENTRYLO1		# load it
187	b	1f
188	rm9000_tlb_hazard
189	tlbwr					# write random tlb entry
1901:
191	nop
192	rm9000_tlb_hazard
193	eret					# return from trap
194	END(except_vec0_r4000)
195
196	/* TLB refill, EXL == 0, R4600 version */
197	LEAF(except_vec0_r4600)
198	.set	mips3
199	GET_PGD(k0, k1)				# get pgd pointer
200	mfc0	k0, CP0_BADVADDR
201	srl	k0, k0, _PGDIR_SHIFT
202	sll	k0, k0, _PGD_T_LOG2
203	addu	k1, k1, k0
204	mfc0	k0, CP0_CONTEXT
205	lw	k1, (k1)
206	GET_PTE_OFF(k0)				# get pte offset
207	and	k0, k0, PTEP_INDX_MSK
208	addu	k1, k1, k0
209	PTE_L	k0, 0(k1)
210	PTE_L	k1, _PTE_T_SIZE(k1)
211	PTE_SRL	k0, k0, 6
212	P_MTC0	k0, CP0_ENTRYLO0
213	PTE_SRL	k1, k1, 6
214	P_MTC0	k1, CP0_ENTRYLO1
215	nop
216	tlbwr
217	nop
218	eret
219	END(except_vec0_r4600)
220
221	/* TLB refill, EXL == 0, R52x0 "Nevada" version */
222        /*
223         * This version has a bug workaround for the Nevada.  It seems
224         * as if under certain circumstances the move from cp0_context
225         * might produce a bogus result when the mfc0 instruction and
226         * it's consumer are in a different cacheline or a load instruction,
227         * probably any memory reference, is between them.  This is
228         * potencially slower than the R4000 version, so we use this
229         * special version.
230         */
231	.set	noreorder
232	.set	noat
233	LEAF(except_vec0_nevada)
234	.set	mips3
235	mfc0	k0, CP0_BADVADDR		# Get faulting address
236	srl	k0, k0, _PGDIR_SHIFT		# get pgd only bits
237	lw	k1, pgd_current			# get pgd pointer
238	sll	k0, k0, _PGD_T_LOG2
239	addu	k1, k1, k0			# add in pgd offset
240	lw	k1, (k1)
241	mfc0	k0, CP0_CONTEXT			# get context reg
242	GET_PTE_OFF(k0)				# get pte offset
243	and	k0, k0, PTEP_INDX_MSK
244	addu	k1, k1, k0			# add in offset
245	PTE_L	k0, 0(k1)			# get even pte
246	PTE_L	k1, _PTE_T_SIZE(k1)		# get odd pte
247	PTE_SRL	k0, k0, 6			# convert to entrylo0
248	P_MTC0	k0, CP0_ENTRYLO0		# load it
249	PTE_SRL	k1, k1, 6			# convert to entrylo1
250	P_MTC0	k1, CP0_ENTRYLO1		# load it
251	nop					# QED specified nops
252	nop
253	tlbwr					# write random tlb entry
254	nop					# traditional nop
255	eret					# return from trap
256	END(except_vec0_nevada)
257
258	/* TLB refill, EXL == 0, SB1 with M3 errata handling version */
259	LEAF(except_vec0_sb1)
260#if BCM1250_M3_WAR
261	mfc0	k0, CP0_BADVADDR
262	mfc0	k1, CP0_ENTRYHI
263	xor	k0, k1
264	srl	k0, k0, PAGE_SHIFT+1
265	bnez	k0, 1f
266#endif
267	GET_PGD(k0, k1)				# get pgd pointer
268	mfc0	k0, CP0_BADVADDR		# Get faulting address
269	srl	k0, k0, _PGDIR_SHIFT		# get pgd only bits
270	sll	k0, k0, _PGD_T_LOG2
271	addu	k1, k1, k0			# add in pgd offset
272	mfc0	k0, CP0_CONTEXT			# get context reg
273	lw	k1, (k1)
274	GET_PTE_OFF(k0)				# get pte offset
275	and	k0, k0, PTEP_INDX_MSK
276	addu	k1, k1, k0			# add in offset
277	PTE_L	k0, 0(k1)			# get even pte
278	PTE_L	k1, _PTE_T_SIZE(k1)		# get odd pte
279	PTE_SRL	k0, k0, 6			# convert to entrylo0
280	P_MTC0	k0, CP0_ENTRYLO0		# load it
281	PTE_SRL	k1, k1, 6			# convert to entrylo1
282	P_MTC0	k1, CP0_ENTRYLO1		# load it
283	tlbwr					# write random tlb entry
2841:	eret					# return from trap
285	END(except_vec0_sb1)
286
287	/* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */
288	LEAF(except_vec0_r45k_bvahwbug)
289	.set	mips3
290	GET_PGD(k0, k1)				# get pgd pointer
291	mfc0	k0, CP0_BADVADDR
292	srl	k0, k0, _PGDIR_SHIFT
293	sll	k0, k0, _PGD_T_LOG2
294	addu	k1, k1, k0
295	mfc0	k0, CP0_CONTEXT
296	lw	k1, (k1)
297#ifndef CONFIG_64BIT_PHYS_ADDR
298	srl	k0, k0, 1
299#endif
300	and	k0, k0, PTEP_INDX_MSK
301	addu	k1, k1, k0
302	PTE_L	k0, 0(k1)
303	PTE_L	k1, _PTE_T_SIZE(k1)
304	nop				/* XXX */
305	tlbp
306	PTE_SRL	k0, k0, 6
307	P_MTC0	k0, CP0_ENTRYLO0
308	PTE_SRL	k1, k1, 6
309	mfc0	k0, CP0_INDEX
310	P_MTC0	k1, CP0_ENTRYLO1
311	bltzl	k0, 1f
312	tlbwr
3131:
314	nop
315	eret
316	END(except_vec0_r45k_bvahwbug)
317
318#ifdef CONFIG_SMP
319	/* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */
320	LEAF(except_vec0_r4k_mphwbug)
321	.set	mips3
322	GET_PGD(k0, k1)				# get pgd pointer
323	mfc0	k0, CP0_BADVADDR
324	srl	k0, k0, _PGDIR_SHIFT
325	sll	k0, k0, _PGD_T_LOG2
326	addu	k1, k1, k0
327	mfc0	k0, CP0_CONTEXT
328	lw	k1, (k1)
329#ifndef CONFIG_64BIT_PHYS_ADDR
330	srl	k0, k0, 1
331#endif
332	and	k0, k0, PTEP_INDX_MSK
333	addu	k1, k1, k0
334	PTE_L	k0, 0(k1)
335	PTE_L	k1, _PTE_T_SIZE(k1)
336	nop				/* XXX */
337	tlbp
338	PTE_SRL	k0, k0, 6
339	P_MTC0	k0, CP0_ENTRYLO0
340	PTE_SRL	k1, k1, 6
341	mfc0	k0, CP0_INDEX
342	P_MTC0	k1, CP0_ENTRYLO1
343	bltzl	k0, 1f
344	tlbwr
3451:
346	nop
347	eret
348	END(except_vec0_r4k_mphwbug)
349#endif
350
351	/* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */
352	LEAF(except_vec0_r4k_250MHZhwbug)
353	.set	mips3
354	GET_PGD(k0, k1)				# get pgd pointer
355	mfc0	k0, CP0_BADVADDR
356	srl	k0, k0, _PGDIR_SHIFT
357	sll	k0, k0, _PGD_T_LOG2
358	addu	k1, k1, k0
359	mfc0	k0, CP0_CONTEXT
360	lw	k1, (k1)
361#ifndef CONFIG_64BIT_PHYS_ADDR
362	srl	k0, k0, 1
363#endif
364	and	k0, k0, PTEP_INDX_MSK
365	addu	k1, k1, k0
366	PTE_L	k0, 0(k1)
367	PTE_L	k1, _PTE_T_SIZE(k1)
368	PTE_SRL	k0, k0, 6
369	P_MTC0	zero, CP0_ENTRYLO0
370	P_MTC0	k0, CP0_ENTRYLO0
371	PTE_SRL	k1, k1, 6
372	P_MTC0	zero, CP0_ENTRYLO1
373	P_MTC0	k1, CP0_ENTRYLO1
374	b	1f
375	tlbwr
3761:
377	nop
378	eret
379	END(except_vec0_r4k_250MHZhwbug)
380
381#ifdef CONFIG_SMP
382	/* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */
383	LEAF(except_vec0_r4k_MP250MHZhwbug)
384	.set	mips3
385	GET_PGD(k0, k1)				# get pgd pointer
386	mfc0	k0, CP0_BADVADDR
387	srl	k0, k0, _PGDIR_SHIFT
388	sll	k0, k0, _PGD_T_LOG2
389	addu	k1, k1, k0
390	mfc0	k0, CP0_CONTEXT
391	lw	k1, (k1)
392#ifndef CONFIG_64BIT_PHYS_ADDR
393	srl	k0, k0, 1
394#endif
395	and	k0, k0, PTEP_INDX_MSK
396	addu	k1, k1, k0
397	PTE_L	k0, 0(k1)
398	PTE_L	k1, _PTE_T_SIZE(k1)
399	nop				/* XXX */
400	tlbp
401	PTE_SRL	k0, k0, 6
402	P_MTC0  zero, CP0_ENTRYLO0
403	P_MTC0  k0, CP0_ENTRYLO0
404	mfc0    k0, CP0_INDEX
405	PTE_SRL	k1, k1, 6
406	P_MTC0	zero, CP0_ENTRYLO1
407	P_MTC0	k1, CP0_ENTRYLO1
408	bltzl	k0, 1f
409	tlbwr
4101:
411	nop
412	eret
413	END(except_vec0_r4k_MP250MHZhwbug)
414#endif
415
416	__FINIT
417
418	.set	noreorder
419
420/*
421 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
422 * 2. A timing hazard exists for the TLBP instruction.
423 *
424 *      stalling_instruction
425 *      TLBP
426 *
427 * The JTLB is being read for the TLBP throughout the stall generated by the
428 * previous instruction. This is not really correct as the stalling instruction
429 * can modify the address used to access the JTLB.  The failure symptom is that
430 * the TLBP instruction will use an address created for the stalling instruction
431 * and not the address held in C0_ENHI and thus report the wrong results.
432 *
433 * The software work-around is to not allow the instruction preceding the TLBP
434 * to stall - make it an NOP or some other instruction guaranteed not to stall.
435 *
436 * Errata 2 will not be fixed.  This errata is also on the R5000.
437 *
438 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
439 */
440#define R5K_HAZARD nop
441
442	/*
443	 * Note for many R4k variants tlb probes cannot be executed out
444	 * of the instruction cache else you get bogus results.
445	 */
446	.align	5
447	NESTED(handle_tlbl, PT_SIZE, sp)
448	.set	noat
449#if BCM1250_M3_WAR
450	mfc0	k0, CP0_BADVADDR
451	mfc0	k1, CP0_ENTRYHI
452	xor	k0, k1
453	srl	k0, k0, PAGE_SHIFT+1
454	beqz	k0, 1f
455	 nop
456	.set	mips3
457	eret
458	.set	mips0
4591:
460#endif
461invalid_tlbl:
462#ifdef TLB_OPTIMIZE
463	.set	mips3
464	/* Test present bit in entry. */
465	LOAD_PTE(k0, k1)
466	R5K_HAZARD
467	tlbp
468	PTE_PRESENT(k0, k1, nopage_tlbl)
469	PTE_MAKEVALID(k0, k1)
470	PTE_RELOAD(k1, k0)
471	rm9000_tlb_hazard
472	nop
473	b	1f
474	 tlbwi
4751:
476	nop
477	rm9000_tlb_hazard
478	.set	mips3
479	eret
480	.set	mips0
481#endif
482
483nopage_tlbl:
484	DO_FAULT(0)
485	END(handle_tlbl)
486
487	.align	5
488	NESTED(handle_tlbs, PT_SIZE, sp)
489	.set	noat
490#ifdef TLB_OPTIMIZE
491	.set	mips3
492        li      k0,0
493	LOAD_PTE(k0, k1)
494	R5K_HAZARD
495	tlbp				# find faulting entry
496	PTE_WRITABLE(k0, k1, nopage_tlbs)
497	PTE_MAKEWRITE(k0, k1)
498	PTE_RELOAD(k1, k0)
499	rm9000_tlb_hazard
500	nop
501	b	1f
502	 tlbwi
5031:
504	nop
505	rm9000_tlb_hazard
506	.set	mips3
507	eret
508	.set	mips0
509#endif
510
511nopage_tlbs:
512	DO_FAULT(1)
513	END(handle_tlbs)
514
515	.align	5
516	NESTED(handle_mod, PT_SIZE, sp)
517	.set	noat
518#ifdef TLB_OPTIMIZE
519	.set	mips3
520	LOAD_PTE(k0, k1)
521	R5K_HAZARD
522	tlbp					# find faulting entry
523	andi	k0, k0, _PAGE_WRITE
524	beqz	k0, nowrite_mod
525	 PTE_L	k0, (k1)
526
527	/* Present and writable bits set, set accessed and dirty bits. */
528	PTE_MAKEWRITE(k0, k1)
529
530	/* Now reload the entry into the tlb. */
531	PTE_RELOAD(k1, k0)
532	rm9000_tlb_hazard
533	nop
534	b	1f
535	 tlbwi
5361:
537	rm9000_tlb_hazard
538	nop
539	.set	mips3
540	eret
541	.set	mips0
542#endif
543
544nowrite_mod:
545	DO_FAULT(1)
546	END(handle_mod)
547