1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
3 
4 /*
5  * Intel Physical Address Extension (PAE) Mode - three-level page
6  * tables on PPro+ CPUs.
7  *
8  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
9  */
10 
11 /*
12  * PGDIR_SHIFT determines what a top-level page table entry can map
13  */
14 #define PGDIR_SHIFT	30
15 #define PTRS_PER_PGD	4
16 
17 /*
18  * PMD_SHIFT determines the size of the area a middle-level
19  * page table can map
20  */
21 #define PMD_SHIFT	21
22 #define PTRS_PER_PMD	512
23 
24 /*
25  * entries per page directory level
26  */
27 #define PTRS_PER_PTE	512
28 
29 #define pte_ERROR(e) \
30 	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
31 #define pmd_ERROR(e) \
32 	printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
33 #define pgd_ERROR(e) \
34 	printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
35 
pgd_none(pgd_t pgd)36 static inline int pgd_none(pgd_t pgd)		{ return 0; }
pgd_bad(pgd_t pgd)37 static inline int pgd_bad(pgd_t pgd)		{ return 0; }
pgd_present(pgd_t pgd)38 static inline int pgd_present(pgd_t pgd)	{ return 1; }
39 
40 /* Rules for using set_pte: the pte being assigned *must* be
41  * either not present or in a state where the hardware will
42  * not attempt to update the pte.  In places where this is
43  * not possible, use pte_get_and_clear to obtain the old pte
44  * value and then use set_pte to update it.  -ben
45  */
set_pte(pte_t * ptep,pte_t pte)46 static inline void set_pte(pte_t *ptep, pte_t pte)
47 {
48 	ptep->pte_high = pte.pte_high;
49 	smp_wmb();
50 	ptep->pte_low = pte.pte_low;
51 }
52 #define set_pmd(pmdptr,pmdval) \
53 		set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
54 #define set_pgd(pgdptr,pgdval) \
55 		set_64bit((unsigned long long *)(pgdptr),pgd_val(pgdval))
56 #define set_pte_atomic(pteptr,pteval) \
57 		set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
58 
59 
60 /*
61  * Pentium-II erratum A13: in PAE mode we explicitly have to flush
62  * the TLB via cr3 if the top-level pgd is changed...
63  * We do not let the generic code free and clear pgd entries due to
64  * this erratum.
65  */
pgd_clear(pgd_t * pgd)66 static inline void pgd_clear (pgd_t * pgd) { }
67 
68 #define pgd_page(pgd) \
69 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
70 
71 /* Find an entry in the second-level page table.. */
72 #define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
73 			__pmd_offset(address))
74 
ptep_get_and_clear(pte_t * ptep)75 static inline pte_t ptep_get_and_clear(pte_t *ptep)
76 {
77 	pte_t res;
78 
79 	/* xchg acts as a barrier before the setting of the high bits */
80 	res.pte_low = xchg(&ptep->pte_low, 0);
81 	res.pte_high = ptep->pte_high;
82 	ptep->pte_high = 0;
83 
84 	return res;
85 }
86 
pte_same(pte_t a,pte_t b)87 static inline int pte_same(pte_t a, pte_t b)
88 {
89 	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
90 }
91 
92 #define pte_page(x)	(mem_map+(((x).pte_low >> PAGE_SHIFT) | ((x).pte_high << (32 - PAGE_SHIFT))))
93 #define pte_none(x)	(!(x).pte_low && !(x).pte_high)
94 
__mk_pte(unsigned long page_nr,pgprot_t pgprot)95 static inline pte_t __mk_pte(unsigned long page_nr, pgprot_t pgprot)
96 {
97 	pte_t pte;
98 
99 	pte.pte_high = page_nr >> (32 - PAGE_SHIFT);
100 	pte.pte_low = (page_nr << PAGE_SHIFT) | pgprot_val(pgprot);
101 	return pte;
102 }
103 
104 #endif /* _I386_PGTABLE_3LEVEL_H */
105