1/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
2 * dtlb_backend.S: Back end to DTLB miss replacement strategy.
3 *                 This is included directly into the trap table.
4 *
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek   (jj@ultra.linux.cz)
7 */
8
9#include <asm/pgtable.h>
10
11#if PAGE_SHIFT == 13
12#define FILL_VALID_SZ_BITS1(r1) \
13	 sllx		%g2, 62, r1
14#define FILL_VALID_SZ_BITS2(r1)
15#define FILL_VALID_SZ_BITS_NOP nop
16#elif PAGE_SHIFT == 16
17#define FILL_VALID_SZ_BITS1(r1) \
18	or		%g0, 5, r1
19#define FILL_VALID_SZ_BITS2(r1) \
20	sllx		r1, 61, r1
21#define FILL_VALID_SZ_BITS_NOP
22#else
23#error unsupported PAGE_SIZE
24#endif /* PAGE_SHIFT */
25
26#define VPTE_BITS		(_PAGE_CP | _PAGE_CV | _PAGE_P )
27#define VPTE_SHIFT		(PAGE_SHIFT - 3)
28#define TLB_PMD_SHIFT		(PAGE_SHIFT - 3 + 3)
29#define TLB_PGD_SHIFT		(PMD_BITS + PAGE_SHIFT - 3 + 3)
30#define TLB_PMD_MASK		(((1 << PMD_BITS) - 1) << 1)
31#define TLB_PGD_MASK		(((1 << (VA_BITS - PAGE_SHIFT - (PAGE_SHIFT - 3) - PMD_BITS)) - 1) << 2)
32
33/* Ways we can get here:
34 *
35 * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
36 * 2) Nucleus loads and stores to/from user/kernel window save areas.
37 * 3) VPTE misses from dtlb_base and itlb_base.
38 */
39
40/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss	*/
41	ldxa		[%g1 + %g1] ASI_DMMU, %g4	! Get TAG_ACCESS
42	add		%g3, %g3, %g5			! Compute VPTE base
43	cmp		%g4, %g5			! VPTE miss?
44	bgeu,pt		%xcc, 1f			! Continue here
45	 andcc		%g4, TAG_CONTEXT_BITS, %g5	! From Nucleus? (for tl0 miss)
46	ba,pt		%xcc, from_tl1_trap		! Fall to tl0 miss
47	 rdpr		%tl, %g5			! For tl0 miss TL==3 test
481:	sllx		%g6, VPTE_SHIFT, %g4		! Position TAG_ACCESS
49
50/* TLB1 ** ICACHE line 2: Quick VPTE miss	  	*/
51	or		%g4, %g5, %g4			! Prepare TAG_ACCESS
52	mov		TSB_REG, %g1			! Grab TSB reg
53	ldxa		[%g1] ASI_DMMU, %g5		! Doing PGD caching?
54	srlx		%g6, (TLB_PMD_SHIFT - 1), %g1	! Position PMD offset
55	be,pn		%xcc, sparc64_vpte_nucleus	! Is it from Nucleus?
56	 and		%g1, TLB_PMD_MASK, %g1		! Mask PMD offset bits
57	brnz,pt		%g5, sparc64_vpte_continue	! Yep, go like smoke
58	 add		%g1, %g1, %g1			! Position PMD offset some more
59
60/* TLB1 ** ICACHE line 3: Quick VPTE miss	  	*/
61	srlx		%g6, (TLB_PGD_SHIFT - 2), %g5	! Position PGD offset
62	and		%g5, TLB_PGD_MASK, %g5		! Mask PGD offset
63	lduwa		[%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
64	brz,pn		%g5, vpte_noent			! Valid?
65sparc64_kpte_continue:
66	 sllx		%g5, 11, %g5			! Shift into place
67sparc64_vpte_continue:
68	lduwa		[%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
69	sllx		%g5, 11, %g5			! Shift into place
70	brz,pn		%g5, vpte_noent			! Valid?
71
72/* TLB1 ** ICACHE line 4: Quick VPTE miss	  	*/
73	 FILL_VALID_SZ_BITS1(%g1)			! Put _PAGE_VALID into %g1
74	FILL_VALID_SZ_BITS2(%g1)			! Put _PAGE_VALID into %g1
75	or		%g5, VPTE_BITS, %g5		! Prepare VPTE data
76	or		%g5, %g1, %g5			! ...
77	mov		TLB_SFSR, %g1			! Restore %g1 value
78	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Load VPTE into TLB
79	stxa		%g4, [%g1 + %g1] ASI_DMMU	! Restore previous TAG_ACCESS
80	retry						! Load PTE once again
81	FILL_VALID_SZ_BITS_NOP
82
83#undef VPTE_SHIFT
84#undef TLB_PMD_SHIFT
85#undef TLB_PGD_SHIFT
86#undef VPTE_BITS
87#undef TLB_PMD_MASK
88#undef TLB_PGD_MASK
89#undef FILL_VALID_SZ_BITS1
90#undef FILL_VALID_SZ_BITS2
91#undef FILL_VALID_SZ_BITS_NOP
92
93