1 #ifndef _ASM_IA64_PGTABLE_H
2 #define _ASM_IA64_PGTABLE_H
3 
4 /*
5  * This file contains the functions and defines necessary to modify and use
6  * the IA-64 page table tree.
7  *
8  * This hopefully works with any (fixed) IA-64 page-size, as defined
9  * in <asm/page.h> (currently 8192).
10  *
11  * Copyright (C) 1998-2002 Hewlett-Packard Co
12  *	David Mosberger-Tang <davidm@hpl.hp.com>
13  */
14 
15 #include <linux/config.h>
16 
17 #include <asm/mman.h>
18 #include <asm/page.h>
19 #include <asm/processor.h>
20 #include <asm/system.h>
21 #include <asm/types.h>
22 
23 #define IA64_MAX_PHYS_BITS	50	/* max. number of physical address bits (architected) */
24 
25 /*
26  * First, define the various bits in a PTE.  Note that the PTE format
27  * matches the VHPT short format, the firt doubleword of the VHPD long
28  * format, and the first doubleword of the TLB insertion format.
29  */
30 #define _PAGE_P_BIT		0
31 #define _PAGE_A_BIT		5
32 #define _PAGE_D_BIT		6
33 
34 #define _PAGE_P			(1 << _PAGE_P_BIT)	/* page present bit */
35 #define _PAGE_MA_WB		(0x0 <<  2)	/* write back memory attribute */
36 #define _PAGE_MA_UC		(0x4 <<  2)	/* uncacheable memory attribute */
37 #define _PAGE_MA_UCE		(0x5 <<  2)	/* UC exported attribute */
38 #define _PAGE_MA_WC		(0x6 <<  2)	/* write coalescing memory attribute */
39 #define _PAGE_MA_NAT		(0x7 <<  2)	/* not-a-thing attribute */
40 #define _PAGE_MA_MASK		(0x7 <<  2)
41 #define _PAGE_PL_0		(0 <<  7)	/* privilege level 0 (kernel) */
42 #define _PAGE_PL_1		(1 <<  7)	/* privilege level 1 (unused) */
43 #define _PAGE_PL_2		(2 <<  7)	/* privilege level 2 (unused) */
44 #define _PAGE_PL_3		(3 <<  7)	/* privilege level 3 (user) */
45 #define _PAGE_PL_MASK		(3 <<  7)
46 #define _PAGE_AR_R		(0 <<  9)	/* read only */
47 #define _PAGE_AR_RX		(1 <<  9)	/* read & execute */
48 #define _PAGE_AR_RW		(2 <<  9)	/* read & write */
49 #define _PAGE_AR_RWX		(3 <<  9)	/* read, write & execute */
50 #define _PAGE_AR_R_RW		(4 <<  9)	/* read / read & write */
51 #define _PAGE_AR_RX_RWX		(5 <<  9)	/* read & exec / read, write & exec */
52 #define _PAGE_AR_RWX_RW		(6 <<  9)	/* read, write & exec / read & write */
53 #define _PAGE_AR_X_RX		(7 <<  9)	/* exec & promote / read & exec */
54 #define _PAGE_AR_MASK		(7 <<  9)
55 #define _PAGE_AR_SHIFT		9
56 #define _PAGE_A			(1 << _PAGE_A_BIT)	/* page accessed bit */
57 #define _PAGE_D			(1 << _PAGE_D_BIT)	/* page dirty bit */
58 #define _PAGE_PPN_MASK		(((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
59 #define _PAGE_ED		(__IA64_UL(1) << 52)	/* exception deferral */
60 #define _PAGE_PROTNONE		(__IA64_UL(1) << 63)
61 
62 #define _PFN_MASK		_PAGE_PPN_MASK
63 /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
64 #define _PAGE_CHG_MASK	(_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
65 
66 #define _PAGE_SIZE_4K	12
67 #define _PAGE_SIZE_8K	13
68 #define _PAGE_SIZE_16K	14
69 #define _PAGE_SIZE_64K	16
70 #define _PAGE_SIZE_256K	18
71 #define _PAGE_SIZE_1M	20
72 #define _PAGE_SIZE_4M	22
73 #define _PAGE_SIZE_16M	24
74 #define _PAGE_SIZE_64M	26
75 #define _PAGE_SIZE_256M	28
76 
77 #define __ACCESS_BITS		_PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
78 #define __DIRTY_BITS_NO_ED	_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
79 #define __DIRTY_BITS		_PAGE_ED | __DIRTY_BITS_NO_ED
80 
81 /*
82  * Definitions for first level:
83  *
84  * PGDIR_SHIFT determines what a first-level page table entry can map.
85  */
86 #define PGDIR_SHIFT		(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
87 #define PGDIR_SIZE		(__IA64_UL(1) << PGDIR_SHIFT)
88 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
89 #define PTRS_PER_PGD		(__IA64_UL(1) << (PAGE_SHIFT-3))
90 #define USER_PTRS_PER_PGD	(5*PTRS_PER_PGD/8)	/* regions 0-4 are user regions */
91 #define FIRST_USER_PGD_NR	0
92 
93 /*
94  * Definitions for second level:
95  *
96  * PMD_SHIFT determines the size of the area a second-level page table
97  * can map.
98  */
99 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
100 #define PMD_SIZE	(__IA64_UL(1) << PMD_SHIFT)
101 #define PMD_MASK	(~(PMD_SIZE-1))
102 #define PTRS_PER_PMD	(__IA64_UL(1) << (PAGE_SHIFT-3))
103 
104 /*
105  * Definitions for third level:
106  */
107 #define PTRS_PER_PTE	(__IA64_UL(1) << (PAGE_SHIFT-3))
108 
109 /*
110  * All the normal masks have the "page accessed" bits on, as any time
111  * they are used, the page is accessed. They are cleared only by the
112  * page-out routines.
113  */
114 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_A)
115 #define PAGE_SHARED	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
116 #define PAGE_READONLY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
117 #define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
118 #define PAGE_GATE	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
119 #define PAGE_KERNEL	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
120 #define PAGE_KERNELRX	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
121 
122 # ifndef __ASSEMBLY__
123 
124 #include <asm/bitops.h>
125 #include <asm/mmu_context.h>
126 #include <asm/processor.h>
127 
128 /*
129  * Next come the mappings that determine how mmap() protection bits
130  * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented.  The
131  * _P version gets used for a private shared memory segment, the _S
132  * version gets used for a shared memory segment with MAP_SHARED on.
133  * In a private shared memory segment, we do a copy-on-write if a task
134  * attempts to write to the page.
135  */
136 	/* xwr */
137 #define __P000	PAGE_NONE
138 #define __P001	PAGE_READONLY
139 #define __P010	PAGE_READONLY	/* write to priv pg -> copy & make writable */
140 #define __P011	PAGE_READONLY	/* ditto */
141 #define __P100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
142 #define __P101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
143 #define __P110	PAGE_COPY
144 #define __P111	PAGE_COPY
145 
146 #define __S000	PAGE_NONE
147 #define __S001	PAGE_READONLY
148 #define __S010	PAGE_SHARED	/* we don't have (and don't need) write-only */
149 #define __S011	PAGE_SHARED
150 #define __S100	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
151 #define __S101	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
152 #define __S110	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
153 #define __S111	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
154 
155 #define pgd_ERROR(e)	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
156 #define pmd_ERROR(e)	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
157 #define pte_ERROR(e)	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
158 
159 
160 /* Quick test to see if ADDR is a (potentially) valid physical address. */
161 static inline long
ia64_phys_addr_valid(unsigned long addr)162 ia64_phys_addr_valid (unsigned long addr)
163 {
164 	return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
165 }
166 
167 /*
168  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
169  * memory.  For the return value to be meaningful, ADDR must be >=
170  * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
171  * require a hash-, or multi-level tree-lookup or something of that
172  * sort) but it guarantees to return TRUE only if accessing the page
173  * at that address does not cause an error.  Note that there may be
174  * addresses for which kern_addr_valid() returns FALSE even though an
175  * access would not cause an error (e.g., this is typically true for
176  * memory mapped I/O regions.
177  *
178  * XXX Need to implement this for IA-64.
179  */
180 #define kern_addr_valid(addr)	(1)
181 
182 
183 /*
184  * Now come the defines and routines to manage and access the three-level
185  * page table.
186  */
187 
188 /*
189  * On some architectures, special things need to be done when setting
190  * the PTE in a page table.  Nothing special needs to be on IA-64.
191  */
192 #define set_pte(ptep, pteval)	(*(ptep) = (pteval))
193 
194 #define RGN_SIZE	(1UL << 61)
195 #define RGN_KERNEL	7
196 
197 #define VMALLOC_START		(0xa000000000000000 + 3*PAGE_SIZE)
198 #define VMALLOC_VMADDR(x)	((unsigned long)(x))
199 #define VMALLOC_END_INIT        (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
200 #define VMALLOC_END             vmalloc_end
201 extern unsigned long vmalloc_end;
202 
203 /*
204  * Conversion functions: convert a page and protection to a page entry,
205  * and a page entry and page directory to the page they refer to.
206  */
207 #define mk_pte(page,pgprot)							\
208 ({										\
209 	pte_t __pte;								\
210 										\
211 	pte_val(__pte) = ((page - mem_map) << PAGE_SHIFT) | pgprot_val(pgprot);	\
212 	__pte;									\
213 })
214 
215 /* This takes a physical page address that is used by the remapping functions */
216 #define mk_pte_phys(physpage, pgprot) \
217 ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
218 
219 #define pte_modify(_pte, newprot) \
220 	(__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
221 
222 #define page_pte_prot(page,prot)	mk_pte(page, prot)
223 #define page_pte(page)			page_pte_prot(page, __pgprot(0))
224 
225 #define pte_none(pte) 			(!pte_val(pte))
226 #define pte_present(pte)		(pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
227 #define pte_clear(pte)			(pte_val(*(pte)) = 0UL)
228 /* pte_page() returns the "struct page *" corresponding to the PTE: */
229 #define pte_page(pte)			(mem_map + (unsigned long) ((pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT))
230 
231 #define pmd_none(pmd)			(!pmd_val(pmd))
232 #define pmd_bad(pmd)			(!ia64_phys_addr_valid(pmd_val(pmd)))
233 #define pmd_present(pmd)		(pmd_val(pmd) != 0UL)
234 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
235 #define pmd_page(pmd)			((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
236 
237 #define pgd_none(pgd)			(!pgd_val(pgd))
238 #define pgd_bad(pgd)			(!ia64_phys_addr_valid(pgd_val(pgd)))
239 #define pgd_present(pgd)		(pgd_val(pgd) != 0UL)
240 #define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0UL)
241 #define pgd_page(pgd)			((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
242 
243 /*
244  * The following have defined behavior only work if pte_present() is true.
245  */
246 #define pte_read(pte)		(((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
247 #define pte_write(pte)	((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
248 #define pte_exec(pte)		((pte_val(pte) & _PAGE_AR_RX) != 0)
249 #define pte_dirty(pte)		((pte_val(pte) & _PAGE_D) != 0)
250 #define pte_young(pte)		((pte_val(pte) & _PAGE_A) != 0)
251 /*
252  * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
253  * access rights:
254  */
255 #define pte_wrprotect(pte)	(__pte(pte_val(pte) & ~_PAGE_AR_RW))
256 #define pte_mkwrite(pte)	(__pte(pte_val(pte) | _PAGE_AR_RW))
257 #define pte_mkexec(pte)		(__pte(pte_val(pte) | _PAGE_AR_RX))
258 #define pte_mkold(pte)		(__pte(pte_val(pte) & ~_PAGE_A))
259 #define pte_mkyoung(pte)	(__pte(pte_val(pte) | _PAGE_A))
260 #define pte_mkclean(pte)	(__pte(pte_val(pte) & ~_PAGE_D))
261 #define pte_mkdirty(pte)	(__pte(pte_val(pte) | _PAGE_D))
262 
263 /*
264  * Macro to make mark a page protection value as "uncacheable".  Note
265  * that "protection" is really a misnomer here as the protection value
266  * contains the memory attribute bits, dirty bits, and various other
267  * bits as well.
268  */
269 #define pgprot_noncached(prot)		__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
270 
271 /*
272  * Macro to make mark a page protection value as "write-combining".
273  * Note that "protection" is really a misnomer here as the protection
274  * value contains the memory attribute bits, dirty bits, and various
275  * other bits as well.  Accesses through a write-combining translation
276  * works bypasses the caches, but does allow for consecutive writes to
277  * be combined into single (but larger) write transactions.
278  */
279 #define pgprot_writecombine(prot)	__pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
280 
281 /*
282  * Return the region index for virtual address ADDRESS.
283  */
284 static inline unsigned long
rgn_index(unsigned long address)285 rgn_index (unsigned long address)
286 {
287 	ia64_va a;
288 
289 	a.l = address;
290 	return a.f.reg;
291 }
292 
293 /*
294  * Return the region offset for virtual address ADDRESS.
295  */
296 static inline unsigned long
rgn_offset(unsigned long address)297 rgn_offset (unsigned long address)
298 {
299 	ia64_va a;
300 
301 	a.l = address;
302 	return a.f.off;
303 }
304 
305 static inline unsigned long
pgd_index(unsigned long address)306 pgd_index (unsigned long address)
307 {
308 	unsigned long region = address >> 61;
309 	unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
310 
311 	return (region << (PAGE_SHIFT - 6)) | l1index;
312 }
313 
314 /* The offset in the 1-level directory is given by the 3 region bits
315    (61..63) and the seven level-1 bits (33-39).  */
316 static inline pgd_t*
pgd_offset(struct mm_struct * mm,unsigned long address)317 pgd_offset (struct mm_struct *mm, unsigned long address)
318 {
319 	return mm->pgd + pgd_index(address);
320 }
321 
322 /* In the kernel's mapped region we have a full 43 bit space available and completely
323    ignore the region number (since we know its in region number 5). */
324 #define pgd_offset_k(addr) \
325 	(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
326 
327 /* Find an entry in the second-level page table.. */
328 #define pmd_offset(dir,addr) \
329 	((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
330 
331 /* Find an entry in the third-level page table.. */
332 #define pte_offset(dir,addr) \
333 	((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
334 
335 /* atomic versions of the some PTE manipulations: */
336 
337 static inline int
ptep_test_and_clear_young(pte_t * ptep)338 ptep_test_and_clear_young (pte_t *ptep)
339 {
340 #ifdef CONFIG_SMP
341 	return test_and_clear_bit(_PAGE_A_BIT, ptep);
342 #else
343 	pte_t pte = *ptep;
344 	if (!pte_young(pte))
345 		return 0;
346 	set_pte(ptep, pte_mkold(pte));
347 	return 1;
348 #endif
349 }
350 
351 static inline int
ptep_test_and_clear_dirty(pte_t * ptep)352 ptep_test_and_clear_dirty (pte_t *ptep)
353 {
354 #ifdef CONFIG_SMP
355 	return test_and_clear_bit(_PAGE_D_BIT, ptep);
356 #else
357 	pte_t pte = *ptep;
358 	if (!pte_dirty(pte))
359 		return 0;
360 	set_pte(ptep, pte_mkclean(pte));
361 	return 1;
362 #endif
363 }
364 
365 static inline pte_t
ptep_get_and_clear(pte_t * ptep)366 ptep_get_and_clear (pte_t *ptep)
367 {
368 #ifdef CONFIG_SMP
369 	return __pte(xchg((long *) ptep, 0));
370 #else
371 	pte_t pte = *ptep;
372 	pte_clear(ptep);
373 	return pte;
374 #endif
375 }
376 
377 static inline void
ptep_set_wrprotect(pte_t * ptep)378 ptep_set_wrprotect (pte_t *ptep)
379 {
380 #ifdef CONFIG_SMP
381 	unsigned long new, old;
382 
383 	do {
384 		old = pte_val(*ptep);
385 		new = pte_val(pte_wrprotect(__pte (old)));
386 	} while (cmpxchg((unsigned long *) ptep, old, new) != old);
387 #else
388 	pte_t old_pte = *ptep;
389 	set_pte(ptep, pte_wrprotect(old_pte));
390 #endif
391 }
392 
393 static inline void
ptep_mkdirty(pte_t * ptep)394 ptep_mkdirty (pte_t *ptep)
395 {
396 #ifdef CONFIG_SMP
397 	set_bit(_PAGE_D_BIT, ptep);
398 #else
399 	pte_t old_pte = *ptep;
400 	set_pte(ptep, pte_mkdirty(old_pte));
401 #endif
402 }
403 
404 static inline int
pte_same(pte_t a,pte_t b)405 pte_same (pte_t a, pte_t b)
406 {
407 	return pte_val(a) == pte_val(b);
408 }
409 
410 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
411 extern void paging_init (void);
412 
413 #define SWP_TYPE(entry)			(((entry).val >> 1) & 0xff)
414 #define SWP_OFFSET(entry)		(((entry).val << 1) >> 10)
415 #define SWP_ENTRY(type,offset)		((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
416 #define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
417 #define swp_entry_to_pte(x)		((pte_t) { (x).val })
418 
419 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
420 #define PageSkip(page)		(0)
421 
422 #define io_remap_page_range remap_page_range	/* XXX is this right? */
423 
424 /*
425  * ZERO_PAGE is a global shared page that is always zero: used
426  * for zero-mapped memory areas etc..
427  */
428 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
429 extern struct page *zero_page_memmap_ptr;
430 #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
431 
432 /* We provide our own get_unmapped_area to cope with VA holes for userland */
433 #define HAVE_ARCH_UNMAPPED_AREA
434 
435 #ifdef CONFIG_HUGETLB_PAGE
436 #define HUGETLB_PGDIR_SHIFT	(HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
437 #define HUGETLB_PGDIR_SIZE	(__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
438 #define HUGETLB_PGDIR_MASK	(~(HUGETLB_PGDIR_SIZE-1))
439 #endif
440 
441 /*
442  * No page table caches to initialise
443  */
444 #define pgtable_cache_init()	do { } while (0)
445 
446 /* arch mem_map init routines are needed due to holes in a virtual mem_map */
447 #define HAVE_ARCH_MEMMAP_INIT
448 
449 typedef unsigned long memmap_init_callback_t(struct page *start,
450 	struct page *end, int zone, unsigned long start_paddr, int highmem);
451 
452 extern unsigned long arch_memmap_init (memmap_init_callback_t *callback,
453 	struct page *start, struct page *end, int zone,
454 	unsigned long start_paddr, int highmem);
455 
456 # endif /* !__ASSEMBLY__ */
457 
458 /*
459  * Identity-mapped regions use a large page size.  We'll call such large pages
460  * "granules".  If you can think of a better name that's unambiguous, let me
461  * know...
462  */
463 #if defined(CONFIG_IA64_GRANULE_64MB)
464 # define IA64_GRANULE_SHIFT	_PAGE_SIZE_64M
465 #elif defined(CONFIG_IA64_GRANULE_16MB)
466 # define IA64_GRANULE_SHIFT	_PAGE_SIZE_16M
467 #endif
468 #define IA64_GRANULE_SIZE	(1 << IA64_GRANULE_SHIFT)
469 /*
470  * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
471  */
472 #define KERNEL_TR_PAGE_SHIFT	_PAGE_SIZE_64M
473 #define KERNEL_TR_PAGE_SIZE	(1 << KERNEL_TR_PAGE_SHIFT)
474 #define KERNEL_TR_PAGE_NUM	((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)
475 
476 #endif /* _ASM_IA64_PGTABLE_H */
477