1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001 by Ralf Baechle at alii
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11
12 #include <linux/config.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15
16 #ifndef __ASSEMBLY__
17
18 #include <linux/linkage.h>
19 #include <asm/cacheflush.h>
20 #include <linux/mmzone.h>
21 #include <asm/cachectl.h>
22 #include <asm/io.h>
23
24 /*
25 * This flag is used to indicate that the page pointed to by a pte
26 * is dirty and requires cleaning before returning it to the user.
27 */
28 #define PG_dcache_dirty PG_arch_1
29
30 #define Page_dcache_dirty(page) \
31 test_bit(PG_dcache_dirty, &(page)->flags)
32 #define SetPageDcacheDirty(page) \
33 set_bit(PG_dcache_dirty, &(page)->flags)
34 #define ClearPageDcacheDirty(page) \
35 clear_bit(PG_dcache_dirty, &(page)->flags)
36
37
38 /*
39 * Each address space has 2 4K pages as its page directory, giving 1024
40 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
41 * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to
42 * page tables. Each page table is a single 4K page, giving 512 (==
43 * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to
44 * invalid_pmd_table, each pmde is initialized to point to
45 * invalid_pte_table, each pte is initialized to 0. When memory is low,
46 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
47 * and empty_bad_page_table is returned back to higher layer code, so
48 * that the failure is recognized later on. Linux does not seem to
49 * handle these failures very well though. The empty_bad_page_table has
50 * invalid pte entries in it, to force page faults.
51 * Vmalloc handling: vmalloc uses swapper_pg_dir[0] (returned by
52 * pgd_offset_k), which is initalized to point to kpmdtbl. kpmdtbl is
53 * the only single page pmd in the system. kpmdtbl entries point into
54 * kptbl[] array. We reserve 1 << PGD_ORDER pages to hold the
55 * vmalloc range translations, which the fault handler looks at.
56 */
57
58 #endif /* !__ASSEMBLY__ */
59
60 /* PMD_SHIFT determines the size of the area a second-level page table can map */
61 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
62 #define PMD_SIZE (1UL << PMD_SHIFT)
63 #define PMD_MASK (~(PMD_SIZE-1))
64
65 /* PGDIR_SHIFT determines what a third-level page table entry can map */
66 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + 1 - 3))
67 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
68 #define PGDIR_MASK (~(PGDIR_SIZE-1))
69
70 #define PGD_T_LOG2 ffz(~sizeof(pgd_t))
71 #define PMD_T_LOG2 ffz(~sizeof(pmd_t))
72 #define PTE_T_LOG2 ffz(~sizeof(pte_t))
73
74 /*
75 * For 4kB page size we use a 3 level page tree and a 8kB pmd and pgds which
76 * permits us mapping 40 bits of virtual address space.
77 *
78 * We used to implement 41 bits by having an order 1 pmd level but that seemed
79 * rather pointless.
80 *
81 * For 16kB page size we use a 2 level page tree which permit a total of
82 * 36 bits of virtual address space. We could add a third leve. but it seems
83 * like at the moment there's no need for this.
84 *
85 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
86 * of virtual address space.
87 */
88 #ifdef CONFIG_PAGE_SIZE_4KB
89 #define PGD_ORDER 1
90 #define PMD_ORDER 1
91 #define PTE_ORDER 0
92 #endif
93 #ifdef CONFIG_PAGE_SIZE_16KB
94 #define PGD_ORDER 0
95 #define PMD_ORDER 0
96 #define PTE_ORDER 0
97 #endif
98 #ifdef CONFIG_PAGE_SIZE_64KB
99 #define PGD_ORDER 0
100 #define PMD_ORDER 0
101 #define PTE_ORDER 0
102 #endif
103
104 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
105 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
106 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
107
108 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
109 #define FIRST_USER_PGD_NR 0
110
111 #define VMALLOC_START XKSEG
112 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
113 #define VMALLOC_END \
114 (VMALLOC_START + ((1 << PGD_ORDER) * PTRS_PER_PTE * PAGE_SIZE))
115
116 #include <asm/pgtable-bits.h>
117
118 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
119 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
120 PAGE_CACHABLE_DEFAULT)
121 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
122 PAGE_CACHABLE_DEFAULT)
123 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
124 PAGE_CACHABLE_DEFAULT)
125 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
126 _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
127 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
128 PAGE_CACHABLE_DEFAULT)
129 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
130 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
131
132 /*
133 * MIPS can't do page protection for execute, and considers that the same like
134 * read. Also, write permissions imply read permissions. This is the closest
135 * we can get by reasonable means..
136 */
137 #define __P000 PAGE_NONE
138 #define __P001 PAGE_READONLY
139 #define __P010 PAGE_COPY
140 #define __P011 PAGE_COPY
141 #define __P100 PAGE_READONLY
142 #define __P101 PAGE_READONLY
143 #define __P110 PAGE_COPY
144 #define __P111 PAGE_COPY
145
146 #define __S000 PAGE_NONE
147 #define __S001 PAGE_READONLY
148 #define __S010 PAGE_SHARED
149 #define __S011 PAGE_SHARED
150 #define __S100 PAGE_READONLY
151 #define __S101 PAGE_READONLY
152 #define __S110 PAGE_SHARED
153 #define __S111 PAGE_SHARED
154
155 #ifndef __ASSEMBLY__
156
157 #define pte_ERROR(e) \
158 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
159 #define pmd_ERROR(e) \
160 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
161 #define pgd_ERROR(e) \
162 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
163
164 /*
165 * ZERO_PAGE is a global shared page that is always zero: used
166 * for zero-mapped memory areas etc..
167 */
168
169 extern unsigned long empty_zero_page;
170 extern unsigned long zero_page_mask;
171
172 #define ZERO_PAGE(vaddr) \
173 (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
174
175 extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
176 extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)];
177 extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
178 extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)];
179
180 /*
181 * Conversion functions: convert a page and protection to a page entry,
182 * and a page entry and page directory to the page they refer to.
183 */
pmd_page(pmd_t pmd)184 static inline unsigned long pmd_page(pmd_t pmd)
185 {
186 return pmd_val(pmd);
187 }
188
pgd_page(pgd_t pgd)189 static inline unsigned long pgd_page(pgd_t pgd)
190 {
191 return pgd_val(pgd);
192 }
193
pmd_set(pmd_t * pmdp,pte_t * ptep)194 static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
195 {
196 pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
197 }
198
pgd_set(pgd_t * pgdp,pmd_t * pmdp)199 static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
200 {
201 pgd_val(*pgdp) = (((unsigned long) pmdp) & PAGE_MASK);
202 }
203
pte_none(pte_t pte)204 static inline int pte_none(pte_t pte)
205 {
206 return !(pte_val(pte) & ~_PAGE_GLOBAL);
207 }
208
pte_present(pte_t pte)209 static inline int pte_present(pte_t pte)
210 {
211 return pte_val(pte) & _PAGE_PRESENT;
212 }
213
214 /*
215 * Certain architectures need to do special things when pte's
216 * within a page table are directly modified. Thus, the following
217 * hook is made available.
218 */
set_pte(pte_t * ptep,pte_t pteval)219 static inline void set_pte(pte_t *ptep, pte_t pteval)
220 {
221 *ptep = pteval;
222 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
223 if (pte_val(pteval) & _PAGE_GLOBAL) {
224 pte_t *buddy = ptep_buddy(ptep);
225 /*
226 * Make sure the buddy is global too (if it's !none,
227 * it better already be global)
228 */
229 if (pte_none(*buddy))
230 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
231 }
232 #endif
233 }
234
pte_clear(pte_t * ptep)235 static inline void pte_clear(pte_t *ptep)
236 {
237 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
238 /* Preserve global status for the pair */
239 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
240 set_pte(ptep, __pte(_PAGE_GLOBAL));
241 else
242 #endif
243 set_pte(ptep, __pte(0));
244 }
245
246 /*
247 * (pmds are folded into pgds so this doesn't get actually called,
248 * but the define is needed for a generic inline function.)
249 */
250 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
251 #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
252
253 /*
254 * Empty pmd entries point to the invalid_pte_table.
255 */
pmd_none(pmd_t pmd)256 static inline int pmd_none(pmd_t pmd)
257 {
258 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
259 }
260
pmd_bad(pmd_t pmd)261 static inline int pmd_bad(pmd_t pmd)
262 {
263 return pmd_val(pmd) &~ PAGE_MASK;
264 }
265
pmd_present(pmd_t pmd)266 static inline int pmd_present(pmd_t pmd)
267 {
268 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
269 }
270
pmd_clear(pmd_t * pmdp)271 static inline void pmd_clear(pmd_t *pmdp)
272 {
273 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
274 }
275
276 /*
277 * Empty pgd entries point to the invalid_pmd_table.
278 */
pgd_none(pgd_t pgd)279 static inline int pgd_none(pgd_t pgd)
280 {
281 return pgd_val(pgd) == (unsigned long) invalid_pmd_table;
282 }
283
pgd_bad(pgd_t pgd)284 static inline int pgd_bad(pgd_t pgd)
285 {
286 return pgd_val(pgd) &~ PAGE_MASK;
287 }
288
pgd_present(pgd_t pgd)289 static inline int pgd_present(pgd_t pgd)
290 {
291 return pgd_val(pgd) != (unsigned long) invalid_pmd_table;
292 }
293
pgd_clear(pgd_t * pgdp)294 static inline void pgd_clear(pgd_t *pgdp)
295 {
296 pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table);
297 }
298
299 #ifndef CONFIG_DISCONTIGMEM
300 #define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
301 #else
302 #define mips64_pte_pagenr(x) \
303 (PLAT_NODE_DATA_STARTNR(PHYSADDR_TO_NID(pte_val(x))) + \
304 PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))))
305 #define pte_page(x) (mem_map+mips64_pte_pagenr(x))
306 #endif
307
308 /*
309 * The following only work if pte_present() is true.
310 * Undefined behaviour if not..
311 */
pte_read(pte_t pte)312 static inline int pte_read(pte_t pte)
313 {
314 return pte_val(pte) & _PAGE_READ;
315 }
316
pte_write(pte_t pte)317 static inline int pte_write(pte_t pte)
318 {
319 return pte_val(pte) & _PAGE_WRITE;
320 }
321
pte_dirty(pte_t pte)322 static inline int pte_dirty(pte_t pte)
323 {
324 return pte_val(pte) & _PAGE_MODIFIED;
325 }
326
pte_young(pte_t pte)327 static inline int pte_young(pte_t pte)
328 {
329 return pte_val(pte) & _PAGE_ACCESSED;
330 }
331
pte_wrprotect(pte_t pte)332 static inline pte_t pte_wrprotect(pte_t pte)
333 {
334 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
335 return pte;
336 }
337
pte_rdprotect(pte_t pte)338 static inline pte_t pte_rdprotect(pte_t pte)
339 {
340 pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ);
341 return pte;
342 }
343
pte_mkclean(pte_t pte)344 static inline pte_t pte_mkclean(pte_t pte)
345 {
346 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
347 return pte;
348 }
349
pte_mkold(pte_t pte)350 static inline pte_t pte_mkold(pte_t pte)
351 {
352 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
353 return pte;
354 }
355
pte_mkwrite(pte_t pte)356 static inline pte_t pte_mkwrite(pte_t pte)
357 {
358 pte_val(pte) |= _PAGE_WRITE;
359 if (pte_val(pte) & _PAGE_MODIFIED)
360 pte_val(pte) |= _PAGE_SILENT_WRITE;
361 return pte;
362 }
363
pte_mkread(pte_t pte)364 static inline pte_t pte_mkread(pte_t pte)
365 {
366 pte_val(pte) |= _PAGE_READ;
367 if (pte_val(pte) & _PAGE_ACCESSED)
368 pte_val(pte) |= _PAGE_SILENT_READ;
369 return pte;
370 }
371
pte_mkdirty(pte_t pte)372 static inline pte_t pte_mkdirty(pte_t pte)
373 {
374 pte_val(pte) |= _PAGE_MODIFIED;
375 if (pte_val(pte) & _PAGE_WRITE)
376 pte_val(pte) |= _PAGE_SILENT_WRITE;
377 return pte;
378 }
379
pte_mkyoung(pte_t pte)380 static inline pte_t pte_mkyoung(pte_t pte)
381 {
382 pte_val(pte) |= _PAGE_ACCESSED;
383 if (pte_val(pte) & _PAGE_READ)
384 pte_val(pte) |= _PAGE_SILENT_READ;
385 return pte;
386 }
387
388 /*
389 * Macro to make mark a page protection value as "uncacheable". Note
390 * that "protection" is really a misnomer here as the protection value
391 * contains the memory attribute bits, dirty bits, and various other
392 * bits as well.
393 */
394 #define pgprot_noncached pgprot_noncached
395
pgprot_noncached(pgprot_t _prot)396 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
397 {
398 unsigned long prot = pgprot_val(_prot);
399
400 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
401
402 return __pgprot(prot);
403 }
404
405 /*
406 * Conversion functions: convert a page and protection to a page entry,
407 * and a page entry and page directory to the page they refer to.
408 */
409 #ifndef CONFIG_DISCONTIGMEM
410 #define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT)
411 #else
412 #define PAGE_TO_PA(page) \
413 ((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
414 + (page_zone(page)->zone_start_paddr))
415 #endif
416 #define mk_pte(page, pgprot) \
417 ({ \
418 pte_t __pte; \
419 \
420 pte_val(__pte) = ((unsigned long)(PAGE_TO_PA(page))) | \
421 pgprot_val(pgprot); \
422 \
423 __pte; \
424 })
425
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)426 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
427 {
428 return __pte(physpage | pgprot_val(pgprot));
429 }
430
pte_modify(pte_t pte,pgprot_t newprot)431 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
432 {
433 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
434 }
435
436 #define page_pte(page) page_pte_prot(page, __pgprot(0))
437
438 /* to find an entry in a kernel page-table-directory */
439 #define pgd_offset_k(address) pgd_offset(&init_mm, 0)
440
441 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
442
443 /* to find an entry in a page-table-directory */
pgd_offset(struct mm_struct * mm,unsigned long address)444 static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
445 {
446 return mm->pgd + pgd_index(address);
447 }
448
449 /* Find an entry in the second-level page table.. */
pmd_offset(pgd_t * dir,unsigned long address)450 static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
451 {
452 return (pmd_t *) pgd_page(*dir) +
453 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
454 }
455
456 /* Find an entry in the third-level page table.. */
pte_offset(pmd_t * dir,unsigned long address)457 static inline pte_t *pte_offset(pmd_t * dir, unsigned long address)
458 {
459 return (pte_t *) (pmd_page(*dir)) +
460 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
461 }
462
463 /*
464 * Initialize a new pgd / pmd table with invalid pointers.
465 */
466 extern void pgd_init(unsigned long page);
467 extern void pmd_init(unsigned long page, unsigned long pagetable);
468
469 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
470 extern void paging_init(void);
471
472 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
473 pte_t pte);
474 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
475 pte_t pte);
476
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t pte)477 static inline void update_mmu_cache(struct vm_area_struct *vma,
478 unsigned long address, pte_t pte)
479 {
480 __update_tlb(vma, address, pte);
481 __update_cache(vma, address, pte);
482 }
483
484 /*
485 * Non-present pages: high 24 bits are offset, next 8 bits type,
486 * low 32 bits zero.
487 */
mk_swap_pte(unsigned long type,unsigned long offset)488 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
489 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
490
491 #define SWP_TYPE(x) (((x).val >> 32) & 0xff)
492 #define SWP_OFFSET(x) ((x).val >> 40)
493 #define SWP_ENTRY(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
494 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
495 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
496
497 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
498 #define PageSkip(page) (0)
499 #ifndef CONFIG_DISCONTIGMEM
500 #define kern_addr_valid(addr) (1)
501 #endif
502
503 /*
504 * No page table caches to initialise
505 */
506 #define pgtable_cache_init() do { } while (0)
507
508 #include <asm-generic/pgtable.h>
509
510 /*
511 * We provide our own get_unmapped area to cope with the virtual aliasing
512 * constraints placed on us by the cache architecture.
513 */
514 #define HAVE_ARCH_UNMAPPED_AREA
515
516 #define io_remap_page_range remap_page_range
517
518 #endif /* !__ASSEMBLY__ */
519
520 #endif /* _ASM_PGTABLE_H */
521