1 /*
2  *  linux/include/asm-arm/pgtable.h
3  *
4  *  Copyright (C) 2000-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12 
13 #include <linux/config.h>
14 #include <asm/memory.h>
15 #include <asm/proc-fns.h>
16 
17 /*
18  * PMD_SHIFT determines the size of the area a second-level page table can map
19  * PGDIR_SHIFT determines what a third-level page table entry can map
20  */
21 #define PMD_SHIFT		20
22 #define PGDIR_SHIFT		20
23 
24 #define LIBRARY_TEXT_START	0x0c000000
25 
26 #ifndef __ASSEMBLY__
27 extern void __pte_error(const char *file, int line, unsigned long val);
28 extern void __pmd_error(const char *file, int line, unsigned long val);
29 extern void __pgd_error(const char *file, int line, unsigned long val);
30 
31 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
32 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
33 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
34 #endif /* !__ASSEMBLY__ */
35 
36 #define PMD_SIZE		(1UL << PMD_SHIFT)
37 #define PMD_MASK		(~(PMD_SIZE-1))
38 #define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
39 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
40 
41 #define FIRST_USER_PGD_NR	1
42 #define USER_PTRS_PER_PGD	((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
43 
44 /*
45  * The table below defines the page protection levels that we insert into our
46  * Linux page table version.  These get translated into the best that the
47  * architecture can perform.  Note that on most ARM hardware:
48  *  1) We cannot do execute protection
49  *  2) If we could do execute protection, then read is implied
50  *  3) write implies read permissions
51  */
52 #define __P000  PAGE_NONE
53 #define __P001  PAGE_READONLY
54 #define __P010  PAGE_COPY
55 #define __P011  PAGE_COPY
56 #define __P100  PAGE_READONLY
57 #define __P101  PAGE_READONLY
58 #define __P110  PAGE_COPY
59 #define __P111  PAGE_COPY
60 
61 #define __S000  PAGE_NONE
62 #define __S001  PAGE_READONLY
63 #define __S010  PAGE_SHARED
64 #define __S011  PAGE_SHARED
65 #define __S100  PAGE_READONLY
66 #define __S101  PAGE_READONLY
67 #define __S110  PAGE_SHARED
68 #define __S111  PAGE_SHARED
69 
70 #ifndef __ASSEMBLY__
71 /*
72  * ZERO_PAGE is a global shared page that is always zero: used
73  * for zero-mapped memory areas etc..
74  */
75 extern struct page *empty_zero_page;
76 #define ZERO_PAGE(vaddr)	(empty_zero_page)
77 
78 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
79 #define pfn_pte(pfn,prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
80 
81 #define pte_none(pte)		(!pte_val(pte))
82 #define pte_clear(ptep)		set_pte((ptep), __pte(0))
83 
84 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
85 
86 #define pmd_none(pmd)		(!pmd_val(pmd))
87 #define pmd_present(pmd)	(pmd_val(pmd))
88 #define pmd_clear(pmdp)		set_pmd(pmdp, __pmd(0))
89 
90 /*
91  * Permanent address of a page. We never have highmem, so this is trivial.
92  */
93 #define pages_to_mb(x)		((x) >> (20 - PAGE_SHIFT))
94 
95 /*
96  * Conversion functions: convert a page and protection to a page entry,
97  * and a page entry and page directory to the page they refer to.
98  */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)99 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
100 {
101 	pte_t pte;
102 	pte_val(pte) = physpage | pgprot_val(pgprot);
103 	return pte;
104 }
105 
106 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
107 
108 /*
109  * The "pgd_xxx()" functions here are trivial for a folded two-level
110  * setup: the pgd is never bad, and a pmd always exists (as it's folded
111  * into the pgd entry)
112  */
113 #define pgd_none(pgd)		(0)
114 #define pgd_bad(pgd)		(0)
115 #define pgd_present(pgd)	(1)
116 #define pgd_clear(pgdp)		do { } while (0)
117 
118 #define page_pte_prot(page,prot)	mk_pte(page, prot)
119 #define page_pte(page)		mk_pte(page, __pgprot(0))
120 
121 /* to find an entry in a page-table-directory */
122 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
123 #define __pgd_offset(addr)	pgd_index(addr)
124 
125 #define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
126 
127 /* to find an entry in a kernel page-table-directory */
128 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
129 
130 /* Find an entry in the second-level page table.. */
131 #define pmd_offset(dir, addr)	((pmd_t *)(dir))
132 
133 /* Find an entry in the third-level page table.. */
134 #define __pte_offset(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
135 #define pte_offset(dir, addr)	((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))
136 
137 #include <asm/proc/pgtable.h>
138 
pte_modify(pte_t pte,pgprot_t newprot)139 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
140 {
141 	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
142 	return pte;
143 }
144 
145 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
146 
147 /* Encode and decode a swap entry.
148  *
149  * We support up to 32GB of swap on 4k machines
150  */
151 #define SWP_TYPE(x)		(((x).val >> 2) & 0x7f)
152 #define SWP_OFFSET(x)		((x).val >> 9)
153 #define SWP_ENTRY(type,offset)	((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
154 #define pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
155 #define swp_entry_to_pte(swp)	((pte_t) { (swp).val })
156 
157 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
158 /* FIXME: this is not correct */
159 #define kern_addr_valid(addr)	(1)
160 
161 #include <asm-generic/pgtable.h>
162 
163 extern void pgtable_cache_init(void);
164 
165 /*
166  * remap a physical address `phys' of size `size' with page protection `prot'
167  * into virtual address `from'
168  */
169 #define io_remap_page_range(from,phys,size,prot) \
170 		remap_page_range(from,phys,size,prot)
171 
172 #endif /* !__ASSEMBLY__ */
173 
174 #endif /* _ASMARM_PGTABLE_H */
175