1 /*
2 * linux/include/asm-arm/proc-armv/pgtable.h
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * 12-Jan-1997 RMK Altered flushing routines to use function pointers
11 * now possible to combine ARM6, ARM7 and StrongARM versions.
12 * 17-Apr-1999 RMK Now pass an area size to clean_cache_area and
13 * flush_icache_area.
14 */
15 #ifndef __ASM_PROC_PGTABLE_H
16 #define __ASM_PROC_PGTABLE_H
17
18 #include <asm/proc/domain.h>
19 #include <asm/arch/vmalloc.h>
20
21 /*
22 * entries per page directory level: they are two-level, so
23 * we don't really have any PMD directory.
24 */
25 #define PTRS_PER_PTE 256
26 #define PTRS_PER_PMD 1
27 #define PTRS_PER_PGD 4096
28
29 /****************
30 * PMD functions *
31 ****************/
32
33 /* PMD types (actually level 1 descriptor) */
34 #define PMD_TYPE_MASK 0x0003
35 #define PMD_TYPE_FAULT 0x0000
36 #define PMD_TYPE_TABLE 0x0001
37 #define PMD_TYPE_SECT 0x0002
38 #define PMD_UPDATABLE 0x0010
39 #define PMD_SECT_CACHEABLE 0x0008
40 #define PMD_SECT_BUFFERABLE 0x0004
41 #define PMD_SECT_AP_WRITE 0x0400
42 #define PMD_SECT_AP_READ 0x0800
43 #define PMD_DOMAIN(x) ((x) << 5)
44
45 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER))
46 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
47
48 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
49 #define set_pmd(pmdp,pmd) cpu_set_pmd(pmdp,pmd)
50
__mk_pmd(pte_t * ptep,unsigned long prot)51 static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
52 {
53 unsigned long pte_ptr = (unsigned long)ptep;
54 pmd_t pmd;
55
56 pte_ptr -= PTRS_PER_PTE * sizeof(void *);
57
58 /*
59 * The pmd must be loaded with the physical
60 * address of the PTE table
61 */
62 pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;
63
64 return pmd;
65 }
66
pmd_page(pmd_t pmd)67 static inline unsigned long pmd_page(pmd_t pmd)
68 {
69 unsigned long ptr;
70
71 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
72
73 ptr += PTRS_PER_PTE * sizeof(void *);
74
75 return __phys_to_virt(ptr);
76 }
77
78 /****************
79 * PTE functions *
80 ****************/
81
82 /* PTE types (actually level 2 descriptor) */
83 #define PTE_TYPE_MASK 0x0003
84 #define PTE_TYPE_FAULT 0x0000
85 #define PTE_TYPE_LARGE 0x0001
86 #define PTE_TYPE_SMALL 0x0002
87 #define PTE_AP_READ 0x0aa0
88 #define PTE_AP_WRITE 0x0550
89 #define PTE_CACHEABLE 0x0008
90 #define PTE_BUFFERABLE 0x0004
91
92 #define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
93
94 /* We now keep two sets of ptes - the physical and the linux version.
95 * This gives us many advantages, and allows us greater flexibility.
96 *
97 * The Linux pte's contain:
98 * bit meaning
99 * 0 page present
100 * 1 young
101 * 2 bufferable - matches physical pte
102 * 3 cacheable - matches physical pte
103 * 4 user
104 * 5 write
105 * 6 execute
106 * 7 dirty
107 * 8-11 unused
108 * 12-31 virtual page address
109 *
110 * These are stored at the pte pointer; the physical PTE is at -1024bytes
111 */
112 #define L_PTE_PRESENT (1 << 0)
113 #define L_PTE_YOUNG (1 << 1)
114 #define L_PTE_BUFFERABLE (1 << 2)
115 #define L_PTE_CACHEABLE (1 << 3)
116 #define L_PTE_USER (1 << 4)
117 #define L_PTE_WRITE (1 << 5)
118 #define L_PTE_EXEC (1 << 6)
119 #define L_PTE_DIRTY (1 << 7)
120
121 /*
122 * The following macros handle the cache and bufferable bits...
123 */
124 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
125 #define _L_PTE_READ L_PTE_USER | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
126
127 #define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
128 #define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
129 #define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
130 #define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
131 #define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE)
132
133 #define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
134
135
136 /*
137 * The following only work if pte_present() is true.
138 * Undefined behaviour if not..
139 */
140 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
141 #define pte_read(pte) (pte_val(pte) & L_PTE_USER)
142 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
143 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
144 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
145 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
146
147 #define PTE_BIT_FUNC(fn,op) \
148 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
149
150 /*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
151 /*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
152 PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
153 PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
154 PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
155 PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC);
156 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
157 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
158 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
159 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
160
161 /*
162 * Mark the prot value as uncacheable and unbufferable.
163 */
164 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
165
166 #endif /* __ASM_PROC_PGTABLE_H */
167