1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
3
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/fixmap.h>
7 #include <asm/pda.h>
8 #include <linux/threads.h>
9 #include <linux/mm.h>
10 #include <asm/page.h>
11
12 #define inc_pgcache_size() add_pda(pgtable_cache_sz,1UL)
13 #define dec_pgcache_size() sub_pda(pgtable_cache_sz,1UL)
14
15 #define pmd_populate(mm, pmd, pte) \
16 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
17 #define pgd_populate(mm, pgd, pmd) \
18 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pmd)))
19
get_pmd_slow(void)20 extern __inline__ pmd_t *get_pmd_slow(void)
21 {
22 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
23 }
24
get_pmd_fast(void)25 extern __inline__ pmd_t *get_pmd_fast(void)
26 {
27 unsigned long *ret;
28
29 if ((ret = read_pda(pmd_quick)) != NULL) {
30 write_pda(pmd_quick, (unsigned long *)(*ret));
31 ret[0] = 0;
32 dec_pgcache_size();
33 } else
34 ret = (unsigned long *)get_pmd_slow();
35 return (pmd_t *)ret;
36 }
37
pmd_free(pmd_t * pmd)38 extern __inline__ void pmd_free(pmd_t *pmd)
39 {
40 *(unsigned long *)pmd = (unsigned long) read_pda(pmd_quick);
41 write_pda(pmd_quick,(unsigned long *) pmd);
42 inc_pgcache_size();
43 }
44
pmd_free_slow(pmd_t * pmd)45 extern __inline__ void pmd_free_slow(pmd_t *pmd)
46 {
47 if ((unsigned long)pmd & (PAGE_SIZE-1))
48 out_of_line_bug();
49 free_page((unsigned long)pmd);
50 }
51
pmd_alloc_one_fast(struct mm_struct * mm,unsigned long addr)52 static inline pmd_t *pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
53 {
54 unsigned long *ret = (unsigned long *)read_pda(pmd_quick);
55
56 if (ret != NULL) {
57 write_pda(pmd_quick, (unsigned long *)(*ret));
58 ret[0] = 0;
59 dec_pgcache_size();
60 }
61 return (pmd_t *)ret;
62 }
63
pmd_alloc_one(struct mm_struct * mm,unsigned long addr)64 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
65 {
66 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
67 }
68
pgd_alloc_one_fast(void)69 static inline pgd_t *pgd_alloc_one_fast (void)
70 {
71 unsigned long *ret = read_pda(pgd_quick);
72
73 if (ret) {
74 write_pda(pgd_quick,(unsigned long *)(*ret));
75 ret[0] = 0;
76 dec_pgcache_size();
77 }
78 return (pgd_t *) ret;
79 }
80
pgd_alloc(struct mm_struct * mm)81 static inline pgd_t *pgd_alloc (struct mm_struct *mm)
82 {
83 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
84 pgd_t *pgd = pgd_alloc_one_fast();
85
86 if (pgd == NULL)
87 pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
88 return pgd;
89 }
90
pgd_free(pgd_t * pgd)91 static inline void pgd_free (pgd_t *pgd)
92 {
93 *(unsigned long *)pgd = (unsigned long) read_pda(pgd_quick);
94 write_pda(pgd_quick,(unsigned long *) pgd);
95 inc_pgcache_size();
96 }
97
98
pgd_free_slow(pgd_t * pgd)99 static inline void pgd_free_slow (pgd_t *pgd)
100 {
101 if ((unsigned long)pgd & (PAGE_SIZE-1))
102 out_of_line_bug();
103 free_page((unsigned long)pgd);
104 }
105
106
pte_alloc_one(struct mm_struct * mm,unsigned long address)107 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
108 {
109 return (pte_t *)get_zeroed_page(GFP_KERNEL);
110 }
111
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)112 extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
113 {
114 unsigned long *ret;
115
116 if ((ret = read_pda(pte_quick)) != NULL) {
117 write_pda(pte_quick, (unsigned long *)(*ret));
118 ret[0] = ret[1];
119 dec_pgcache_size();
120 }
121 return (pte_t *)ret;
122 }
123
124 /* Should really implement gc for free page table pages. This could be done with
125 a reference count in struct page. */
126
pte_free(pte_t * pte)127 extern __inline__ void pte_free(pte_t *pte)
128 {
129 *(unsigned long *)pte = (unsigned long) read_pda(pte_quick);
130 write_pda(pte_quick, (unsigned long *) pte);
131 inc_pgcache_size();
132 }
133
pte_free_slow(pte_t * pte)134 extern __inline__ void pte_free_slow(pte_t *pte)
135 {
136 if ((unsigned long)pte & (PAGE_SIZE-1))
137 out_of_line_bug();
138 free_page((unsigned long)pte);
139 }
140
141
142 extern int do_check_pgt_cache(int, int);
143
144 /*
145 * TLB flushing:
146 *
147 * - flush_tlb() flushes the current mm struct TLBs
148 * - flush_tlb_all() flushes all processes TLBs
149 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
150 * - flush_tlb_page(vma, vmaddr) flushes one page
151 * - flush_tlb_range(mm, start, end) flushes a range of pages
152 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
153 */
154
155 #ifndef CONFIG_SMP
156
157 #define flush_tlb() __flush_tlb()
158 #define flush_tlb_all() __flush_tlb_all()
159 #define local_flush_tlb() __flush_tlb()
160
flush_tlb_mm(struct mm_struct * mm)161 static inline void flush_tlb_mm(struct mm_struct *mm)
162 {
163 if (mm == current->active_mm)
164 __flush_tlb();
165 }
166
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)167 static inline void flush_tlb_page(struct vm_area_struct *vma,
168 unsigned long addr)
169 {
170 if (vma->vm_mm == current->active_mm)
171 __flush_tlb_one(addr);
172 }
173
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)174 static inline void flush_tlb_range(struct mm_struct *mm,
175 unsigned long start, unsigned long end)
176 {
177 if (mm == current->active_mm)
178 __flush_tlb();
179 }
180
181 #else
182
183 #include <asm/smp.h>
184
185 #define local_flush_tlb() \
186 __flush_tlb()
187
188 extern void flush_tlb_all(void);
189 extern void flush_tlb_current_task(void);
190 extern void flush_tlb_mm(struct mm_struct *);
191 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
192
193 #define flush_tlb() flush_tlb_current_task()
194
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)195 static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
196 {
197 flush_tlb_mm(mm);
198 }
199
200 #define TLBSTATE_OK 1
201 #define TLBSTATE_LAZY 2
202
203 struct tlb_state
204 {
205 struct mm_struct *active_mm;
206 int state;
207 } ____cacheline_aligned;
208 extern struct tlb_state cpu_tlbstate[NR_CPUS];
209
210
211 #endif
212
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)213 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
214 unsigned long start, unsigned long end)
215 {
216 flush_tlb_mm(mm);
217 }
218
219 #endif /* _X86_64_PGALLOC_H */
220