1 #ifndef _ASM_PGALLOC_H
2 #define _ASM_PGALLOC_H
3
4 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
6
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
9 #include <linux/threads.h>
10
11 #include <asm/pgtable.h>
12 #include <asm/cache.h>
13
14 #define flush_kernel_dcache_range(start,size) \
15 flush_kernel_dcache_range_asm((start), (start)+(size));
16
17 static inline void
flush_page_to_ram(struct page * page)18 flush_page_to_ram(struct page *page)
19 {
20 }
21
22 extern void flush_cache_all_local(void);
23
24 #ifdef CONFIG_SMP
flush_cache_all(void)25 static inline void flush_cache_all(void)
26 {
27 smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
28 flush_cache_all_local();
29 }
30 #else
31 #define flush_cache_all flush_cache_all_local
32 #endif
33
34 #ifdef CONFIG_SMP
35 #define flush_cache_mm(mm) flush_cache_all()
36 #else
37 #define flush_cache_mm(mm) flush_cache_all_local()
38 #endif
39
40 /* The following value needs to be tuned and probably scaled with the
41 * cache size.
42 */
43
44 #define FLUSH_THRESHOLD 0x80000
45
46 static inline void
flush_user_dcache_range(unsigned long start,unsigned long end)47 flush_user_dcache_range(unsigned long start, unsigned long end)
48 {
49 #ifdef CONFIG_SMP
50 flush_user_dcache_range_asm(start,end);
51 #else
52 if ((end - start) < FLUSH_THRESHOLD)
53 flush_user_dcache_range_asm(start,end);
54 else
55 flush_data_cache();
56 #endif
57 }
58
59 static inline void
flush_user_icache_range(unsigned long start,unsigned long end)60 flush_user_icache_range(unsigned long start, unsigned long end)
61 {
62 #ifdef CONFIG_SMP
63 flush_user_icache_range_asm(start,end);
64 #else
65 if ((end - start) < FLUSH_THRESHOLD)
66 flush_user_icache_range_asm(start,end);
67 else
68 flush_instruction_cache();
69 #endif
70 }
71
72 static inline void
flush_cache_range(struct mm_struct * mm,unsigned long start,unsigned long end)73 flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
74 {
75 int sr3;
76
77 if (!mm->context) {
78 BUG();
79 return;
80 }
81
82 sr3 = mfsp(3);
83 if (mm->context == sr3) {
84 flush_user_dcache_range(start,end);
85 flush_user_icache_range(start,end);
86 } else {
87 flush_cache_all();
88 }
89 }
90
91 static inline void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr)92 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
93 {
94 int sr3;
95
96 if (!vma->vm_mm->context) {
97 BUG();
98 return;
99 }
100
101 sr3 = mfsp(3);
102 if (vma->vm_mm->context == sr3) {
103 flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
104 if (vma->vm_flags & VM_EXEC)
105 flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
106 } else {
107 if (vma->vm_flags & VM_EXEC)
108 flush_cache_all();
109 else
110 flush_data_cache();
111 }
112 }
113
114 extern void __flush_dcache_page(struct page *page);
flush_dcache_page(struct page * page)115 static inline void flush_dcache_page(struct page *page)
116 {
117 if (page->mapping && !page->mapping->i_mmap &&
118 !page->mapping->i_mmap_shared) {
119 set_bit(PG_dcache_dirty, &page->flags);
120 } else {
121 __flush_dcache_page(page);
122 }
123 }
124
125 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
126
127 #define flush_icache_user_range(vma, page, addr, len) \
128 flush_user_icache_range(addr, addr + len);
129
130 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
131
132 /* TLB flushing routines.... */
133
134 extern void flush_tlb_all(void);
135
load_context(mm_context_t context)136 static inline void load_context(mm_context_t context)
137 {
138 mtsp(context, 3);
139 #if SPACEID_SHIFT == 0
140 mtctl(context << 1,8);
141 #else
142 mtctl(context >> (SPACEID_SHIFT - 1),8);
143 #endif
144 }
145
146 /*
147 * flush_tlb_mm()
148 *
149 * XXX This code is NOT valid for HP-UX compatibility processes,
150 * (although it will probably work 99% of the time). HP-UX
151 * processes are free to play with the space id's and save them
152 * over long periods of time, etc. so we have to preserve the
153 * space and just flush the entire tlb. We need to check the
154 * personality in order to do that, but the personality is not
155 * currently being set correctly.
156 *
157 * Of course, Linux processes could do the same thing, but
158 * we don't support that (and the compilers, dynamic linker,
159 * etc. do not do that).
160 */
161
flush_tlb_mm(struct mm_struct * mm)162 static inline void flush_tlb_mm(struct mm_struct *mm)
163 {
164 if (mm == &init_mm) BUG(); /* Should never happen */
165
166 #ifdef CONFIG_SMP
167 flush_tlb_all();
168 #else
169 if (mm) {
170 if (mm->context != 0)
171 free_sid(mm->context);
172 mm->context = alloc_sid();
173 if (mm == current->active_mm)
174 load_context(mm->context);
175 }
176 #endif
177 }
178
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)179 extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
180 {
181 }
182
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)183 static inline void flush_tlb_page(struct vm_area_struct *vma,
184 unsigned long addr)
185 {
186 /* For one page, it's not worth testing the split_tlb variable */
187
188 mtsp(vma->vm_mm->context,1);
189 pdtlb(addr);
190 pitlb(addr);
191 }
192
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)193 static inline void flush_tlb_range(struct mm_struct *mm,
194 unsigned long start, unsigned long end)
195 {
196 unsigned long npages;
197
198 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
199 if (npages >= 512) /* XXX arbitrary, should be tuned */
200 flush_tlb_all();
201 else {
202
203 mtsp(mm->context,1);
204 if (split_tlb) {
205 while (npages--) {
206 pdtlb(start);
207 pitlb(start);
208 start += PAGE_SIZE;
209 }
210 } else {
211 while (npages--) {
212 pdtlb(start);
213 start += PAGE_SIZE;
214 }
215 }
216 }
217 }
218
pgd_alloc_one_fast(void)219 static inline pgd_t *pgd_alloc_one_fast (void)
220 {
221 return NULL; /* not implemented */
222 }
223
pgd_alloc(struct mm_struct * mm)224 static inline pgd_t *pgd_alloc (struct mm_struct *mm)
225 {
226 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
227 pgd_t *pgd = pgd_alloc_one_fast();
228 if (!pgd) {
229 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
230 if (pgd)
231 clear_page(pgd);
232 }
233 return pgd;
234 }
235
pgd_free(pgd_t * pgd)236 static inline void pgd_free(pgd_t *pgd)
237 {
238 free_page((unsigned long)pgd);
239 }
240
241 #ifdef __LP64__
242
243 /* Three Level Page Table Support for pmd's */
244
pgd_populate(struct mm_struct * mm,pgd_t * pgd,pmd_t * pmd)245 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
246 {
247 pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd);
248 }
249
pmd_alloc_one_fast(struct mm_struct * mm,unsigned long address)250 static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
251 {
252 return NULL; /* la la */
253 }
254
pmd_alloc_one(struct mm_struct * mm,unsigned long address)255 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
256 {
257 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
258 if (pmd)
259 clear_page(pmd);
260 return pmd;
261 }
262
pmd_free(pmd_t * pmd)263 static inline void pmd_free(pmd_t *pmd)
264 {
265 free_page((unsigned long)pmd);
266 }
267
268 #else
269
270 /* Two Level Page Table Support for pmd's */
271
272 /*
273 * allocating and freeing a pmd is trivial: the 1-entry pmd is
274 * inside the pgd, so has no extra memory associated with it.
275 */
276
277 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
278 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
279 #define pmd_free(x) do { } while (0)
280 #define pgd_populate(mm, pmd, pte) BUG()
281
282 #endif
283
pmd_populate(struct mm_struct * mm,pmd_t * pmd_entry,pte_t * pte)284 static inline void pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
285 {
286 pmd_val(*pmd_entry) = _PAGE_TABLE + __pa((unsigned long)pte);
287 }
288
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)289 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
290 {
291 return NULL; /* la la */
292 }
293
pte_alloc_one(struct mm_struct * mm,unsigned long address)294 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
295 {
296 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
297 if (pte)
298 clear_page(pte);
299 return pte;
300 }
301
pte_free(pte_t * pte)302 static inline void pte_free(pte_t *pte)
303 {
304 free_page((unsigned long)pte);
305 }
306
307 extern int do_check_pgt_cache(int, int);
308
309 #endif
310