1 /* $Id: pgtable.h,v 1.154 2001/12/05 06:05:36 davem Exp $
2  * pgtable.h: SpitFire page table operations.
3  *
4  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7 
8 #ifndef _SPARC64_PGTABLE_H
9 #define _SPARC64_PGTABLE_H
10 
11 /* This file contains the functions and defines necessary to modify and use
12  * the SpitFire page tables.
13  */
14 
15 #include <asm/spitfire.h>
16 #include <asm/asi.h>
17 #include <asm/mmu_context.h>
18 #include <asm/system.h>
19 #include <asm/page.h>
20 #include <asm/processor.h>
21 
22 /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB).
23  * The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB).
24  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
25  * The vmalloc area spans 0x140000000 to 0x200000000.
26  * There is a single static kernel PMD which maps from 0x0 to address
27  * 0x400000000.
28  */
29 #define	TLBTEMP_BASE		0x0000000001000000
30 #define MODULES_VADDR		0x0000000002000000
31 #define MODULES_LEN		0x000000007e000000
32 #define MODULES_END		0x0000000080000000
33 #define VMALLOC_START		0x0000000140000000
34 #define VMALLOC_VMADDR(x)	((unsigned long)(x))
35 #define VMALLOC_END		0x0000000200000000
36 #define LOW_OBP_ADDRESS		0x00000000f0000000
37 #define HI_OBP_ADDRESS		0x0000000100000000
38 
39 /* XXX All of this needs to be rethought so we can take advantage
40  * XXX cheetah's full 64-bit virtual address space, ie. no more hole
41  * XXX in the middle like on spitfire. -DaveM
42  */
43 /*
44  * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
45  * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
46  * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
47  * table is a single page long). The next higher PMD_BITS determine pmd#
48  * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
49  * since the pmd entries are 4 bytes, and each pmd page is a single page
50  * long). Finally, the higher few bits determine pgde#.
51  */
52 
53 /* PMD_SHIFT determines the size of the area a second-level page table can map */
54 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
55 #define PMD_SIZE	(1UL << PMD_SHIFT)
56 #define PMD_MASK	(~(PMD_SIZE-1))
57 #define PMD_BITS	11
58 
59 /* PGDIR_SHIFT determines what a third-level page table entry can map */
60 #define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
61 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
62 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
63 
64 #ifndef __ASSEMBLY__
65 
66 /* Certain architectures need to do special things when pte's
67  * within a page table are directly modified.  Thus, the following
68  * hook is made available.
69  */
70 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
71 
72 /* Entries per page directory level. */
73 #define PTRS_PER_PTE		(1UL << (PAGE_SHIFT-3))
74 
75 /* We the first one in this file, what we export to the kernel
76  * is different so we can optimize correctly for 32-bit tasks.
77  */
78 #define REAL_PTRS_PER_PMD	(1UL << PMD_BITS)
79 #define PTRS_PER_PMD		((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
80 				 (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : (REAL_PTRS_PER_PMD)))
81 
82 /*
83  * We cannot use the top address range because VPTE table lives there. This
84  * formula finds the total legal virtual space in the processor, subtracts the
85  * vpte size, then aligns it to the number of bytes mapped by one pgde, and
86  * thus calculates the number of pgdes needed.
87  */
88 #define PTRS_PER_PGD	(((1UL << VA_BITS) - VPTE_SIZE + (1UL << (PAGE_SHIFT + \
89 			(PAGE_SHIFT-3) + PMD_BITS)) - 1) / (1UL << (PAGE_SHIFT + \
90 			(PAGE_SHIFT-3) + PMD_BITS)))
91 
92 /* Kernel has a separate 44bit address space. */
93 #define USER_PTRS_PER_PGD	((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
94 				 (1) : (PTRS_PER_PGD)))
95 #define FIRST_USER_PGD_NR	0
96 
97 #define pte_ERROR(e)	__builtin_trap()
98 #define pmd_ERROR(e)	__builtin_trap()
99 #define pgd_ERROR(e)	__builtin_trap()
100 
101 #endif /* !(__ASSEMBLY__) */
102 
103 /* Spitfire/Cheetah TTE bits. */
104 #define _PAGE_VALID	0x8000000000000000	/* Valid TTE                 */
105 #define _PAGE_R		0x8000000000000000	/* Keep ref bit up to date   */
106 #define _PAGE_SZ4MB	0x6000000000000000	/* 4MB Page                  */
107 #define _PAGE_SZ512K	0x4000000000000000	/* 512K Page                 */
108 #define _PAGE_SZ64K	0x2000000000000000	/* 64K Page                  */
109 #define _PAGE_SZ8K	0x0000000000000000	/* 8K Page                   */
110 #define _PAGE_NFO	0x1000000000000000	/* No Fault Only             */
111 #define _PAGE_IE	0x0800000000000000	/* Invert Endianness         */
112 #define _PAGE_SOFT2	0x07FC000000000000	/* Software bits, set 2      */
113 #define _PAGE_RES1	0x0003000000000000	/* Reserved                  */
114 #define _PAGE_SN	0x0000800000000000	/* (Cheetah) Snoop           */
115 #define _PAGE_RES2	0x0000780000000000	/* Reserved                  */
116 #define _PAGE_PADDR_SF	0x000001FFFFFFE000	/* (Spitfire) paddr[40:13]   */
117 #define _PAGE_PADDR	0x000007FFFFFFE000	/* (Cheetah) paddr[42:13]    */
118 #define _PAGE_SOFT	0x0000000000001F80	/* Software bits             */
119 #define _PAGE_L		0x0000000000000040	/* Locked TTE                */
120 #define _PAGE_CP	0x0000000000000020	/* Cacheable in P-Cache      */
121 #define _PAGE_CV	0x0000000000000010	/* Cacheable in V-Cache      */
122 #define _PAGE_E		0x0000000000000008	/* side-Effect               */
123 #define _PAGE_P		0x0000000000000004	/* Privileged Page           */
124 #define _PAGE_W		0x0000000000000002	/* Writable                  */
125 #define _PAGE_G		0x0000000000000001	/* Global                    */
126 
127 /* Here are the SpitFire software bits we use in the TTE's. */
128 #define _PAGE_EXEC	0x0000000000001000	/* Executable SW bit         */
129 #define _PAGE_MODIFIED	0x0000000000000800	/* Modified Page (ie. dirty) */
130 #define _PAGE_ACCESSED	0x0000000000000400	/* Accessed Page (ie. ref'd) */
131 #define _PAGE_READ	0x0000000000000200	/* Readable SW Bit           */
132 #define _PAGE_WRITE	0x0000000000000100	/* Writable SW Bit           */
133 #define _PAGE_PRESENT	0x0000000000000080	/* Present                   */
134 
135 #if PAGE_SHIFT == 13
136 #define _PAGE_SZBITS	_PAGE_SZ8K
137 #elif PAGE_SHIFT == 16
138 #define _PAGE_SZBITS	_PAGE_SZ64K
139 #elif PAGE_SHIFT == 19
140 #define _PAGE_SZBITS	_PAGE_SZ512K
141 #elif PAGE_SHIFT == 22
142 #define _PAGE_SZBITS	_PAGE_SZ4M
143 #else
144 #error Wrong PAGE_SHIFT specified
145 #endif
146 
147 #define _PAGE_CACHE	(_PAGE_CP | _PAGE_CV)
148 
149 #define __DIRTY_BITS	(_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
150 #define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
151 #define __PRIV_BITS	_PAGE_P
152 
153 #define PAGE_NONE	__pgprot (_PAGE_PRESENT | _PAGE_ACCESSED)
154 
155 /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
156 #define PAGE_SHARED	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
157 				  __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
158 
159 #define PAGE_COPY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
160 				  __ACCESS_BITS | _PAGE_EXEC)
161 
162 #define PAGE_READONLY	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
163 				  __ACCESS_BITS | _PAGE_EXEC)
164 
165 #define PAGE_KERNEL	__pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
166 				  __PRIV_BITS | \
167 				  __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
168 
169 #define PAGE_SHARED_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
170 					  _PAGE_CACHE | \
171 					  __ACCESS_BITS | _PAGE_WRITE)
172 
173 #define PAGE_COPY_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
174 					  _PAGE_CACHE | __ACCESS_BITS)
175 
176 #define PAGE_READONLY_NOEXEC	__pgprot (_PAGE_PRESENT | _PAGE_VALID | \
177 					  _PAGE_CACHE | __ACCESS_BITS)
178 
179 #define PAGE_INVALID	__pgprot (0)
180 
181 #define _PFN_MASK	_PAGE_PADDR
182 
183 #define _PAGE_CHG_MASK	(_PFN_MASK | _PAGE_MODIFIED | _PAGE_ACCESSED | _PAGE_PRESENT | _PAGE_SZBITS)
184 
185 #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
186 
187 #define __P000	PAGE_NONE
188 #define __P001	PAGE_READONLY_NOEXEC
189 #define __P010	PAGE_COPY_NOEXEC
190 #define __P011	PAGE_COPY_NOEXEC
191 #define __P100	PAGE_READONLY
192 #define __P101	PAGE_READONLY
193 #define __P110	PAGE_COPY
194 #define __P111	PAGE_COPY
195 
196 #define __S000	PAGE_NONE
197 #define __S001	PAGE_READONLY_NOEXEC
198 #define __S010	PAGE_SHARED_NOEXEC
199 #define __S011	PAGE_SHARED_NOEXEC
200 #define __S100	PAGE_READONLY
201 #define __S101	PAGE_READONLY
202 #define __S110	PAGE_SHARED
203 #define __S111	PAGE_SHARED
204 
205 #ifndef __ASSEMBLY__
206 
207 extern unsigned long phys_base;
208 
209 extern struct page *mem_map_zero;
210 #define ZERO_PAGE(vaddr)	(mem_map_zero)
211 
212 /* Warning: These take pointers to page structs now... */
213 #define mk_pte(page, pgprot)		\
214 	__pte((((page - mem_map) << PAGE_SHIFT)+phys_base) | pgprot_val(pgprot) | _PAGE_SZBITS)
215 #define page_pte_prot(page, prot)	mk_pte(page, prot)
216 #define page_pte(page)			page_pte_prot(page, __pgprot(0))
217 
218 #define mk_pte_phys(physpage, pgprot)	(__pte((physpage) | pgprot_val(pgprot) | _PAGE_SZBITS))
219 
pte_modify(pte_t orig_pte,pgprot_t new_prot)220 extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
221 {
222 	pte_t __pte;
223 
224 	pte_val(__pte) = (pte_val(orig_pte) & _PAGE_CHG_MASK) |
225 		pgprot_val(new_prot);
226 
227 	return __pte;
228 }
229 #define pmd_set(pmdp, ptep)	\
230 	(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
231 #define pgd_set(pgdp, pmdp)	\
232 	(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
233 #define pmd_page(pmd)		\
234 	((unsigned long) __va(((unsigned long)pmd_val(pmd))<<11UL))
235 #define pgd_page(pgd)		\
236 	((unsigned long) __va(((unsigned long)pgd_val(pgd))<<11UL))
237 #define pte_none(pte) 			(!pte_val(pte))
238 #define pte_present(pte)		(pte_val(pte) & _PAGE_PRESENT)
239 #define pte_clear(pte)			(pte_val(*(pte)) = 0UL)
240 #define pmd_none(pmd)			(!pmd_val(pmd))
241 #define pmd_bad(pmd)			(0)
242 #define pmd_present(pmd)		(pmd_val(pmd) != 0U)
243 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0U)
244 #define pgd_none(pgd)			(!pgd_val(pgd))
245 #define pgd_bad(pgd)			(0)
246 #define pgd_present(pgd)		(pgd_val(pgd) != 0U)
247 #define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0U)
248 
249 /* The following only work if pte_present() is true.
250  * Undefined behaviour if not..
251  */
252 #define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
253 #define pte_exec(pte)		pte_read(pte)
254 #define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
255 #define pte_dirty(pte)		(pte_val(pte) & _PAGE_MODIFIED)
256 #define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
257 #define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W)))
258 #define pte_rdprotect(pte)	(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ))
259 #define pte_mkclean(pte)	(__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
260 #define pte_mkold(pte)		(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
261 
262 /* Permanent address of a page. */
263 #define __page_address(page)	page_address(page)
264 
265 #define pte_page(x) (mem_map+(((pte_val(x)&_PAGE_PADDR)-phys_base)>>PAGE_SHIFT))
266 
267 /* Be very careful when you change these three, they are delicate. */
268 #define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
269 #define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_WRITE))
270 #define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
271 
272 /* to find an entry in a page-table-directory. */
273 #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD))
274 #define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
275 
276 /* to find an entry in a kernel page-table-directory */
277 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
278 
279 /* Find an entry in the second-level page table.. */
280 #define pmd_offset(dir, address)	((pmd_t *) pgd_page(*(dir)) + \
281 					((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
282 
283 /* Find an entry in the third-level page table.. */
284 #define pte_offset(dir, address)	((pte_t *) pmd_page(*(dir)) + \
285 					((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
286 
287 extern pgd_t swapper_pg_dir[1];
288 
289 /* These do nothing with the way I have things setup. */
290 #define mmu_lockarea(vaddr, len)		(vaddr)
291 #define mmu_unlockarea(vaddr, len)		do { } while(0)
292 
293 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
294 
295 #define flush_icache_page(vma, pg)	do { } while(0)
296 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
297 
298 /* Make a non-present pseudo-TTE. */
mk_pte_io(unsigned long page,pgprot_t prot,int space)299 extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
300 {
301 	pte_t pte;
302 	pte_val(pte) = ((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE;
303 	pte_val(pte) |= (((unsigned long)space) << 32);
304 	return pte;
305 }
306 
307 /* Encode and de-code a swap entry */
308 #define SWP_TYPE(entry)		(((entry).val >> PAGE_SHIFT) & 0xffUL)
309 #define SWP_OFFSET(entry)	((entry).val >> (PAGE_SHIFT + 8UL))
310 #define SWP_ENTRY(type, offset)	\
311 	( (swp_entry_t) \
312 	  { \
313 		(((long)(type) << PAGE_SHIFT) | \
314                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
315 	  } )
316 #define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
317 #define swp_entry_to_pte(x)		((pte_t) { (x).val })
318 
319 extern unsigned long prom_virt_to_phys(unsigned long, int *);
320 
321 extern __inline__ unsigned long
sun4u_get_pte(unsigned long addr)322 sun4u_get_pte (unsigned long addr)
323 {
324 	pgd_t *pgdp;
325 	pmd_t *pmdp;
326 	pte_t *ptep;
327 
328 	if (addr >= PAGE_OFFSET)
329 		return addr & _PAGE_PADDR;
330 	if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
331 		return prom_virt_to_phys(addr, 0);
332 	pgdp = pgd_offset_k (addr);
333 	pmdp = pmd_offset (pgdp, addr);
334 	ptep = pte_offset (pmdp, addr);
335 	return pte_val (*ptep) & _PAGE_PADDR;
336 }
337 
338 extern __inline__ unsigned long
__get_phys(unsigned long addr)339 __get_phys (unsigned long addr)
340 {
341 	return sun4u_get_pte (addr);
342 }
343 
344 extern __inline__ int
__get_iospace(unsigned long addr)345 __get_iospace (unsigned long addr)
346 {
347 	return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
348 }
349 
350 extern unsigned long *sparc64_valid_addr_bitmap;
351 
352 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
353 #define kern_addr_valid(addr)	\
354 	(test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
355 
356 extern int io_remap_page_range(unsigned long from, unsigned long offset,
357 			       unsigned long size, pgprot_t prot, int space);
358 
359 #include <asm-generic/pgtable.h>
360 
361 /* We provide our own get_unmapped_area to cope with VA holes for userland */
362 #define HAVE_ARCH_UNMAPPED_AREA
363 
364 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
365  * the largest alignment possible such that larget PTEs can be used.
366  */
367 extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);
368 #define HAVE_ARCH_FB_UNMAPPED_AREA
369 
370 #endif /* !(__ASSEMBLY__) */
371 
372 /*
373  * No page table caches to initialise
374  */
375 #define pgtable_cache_init()	do { } while (0)
376 
377 #endif /* !(_SPARC64_PGTABLE_H) */
378