1 /*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hpenner@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
15
16 #include <linux/config.h>
17 #include <asm/processor.h>
18 #include <linux/threads.h>
19 #include <linux/slab.h>
20
21 #define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
22 #define pmd_quicklist (S390_lowcore.cpu_data.pmd_quick)
23 #define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
24 #define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
25
26 extern void diag10(unsigned long addr);
27
28 /*
29 * Allocate and free page tables. The xxx_kernel() versions are
30 * used to allocate a kernel page table - this turns on ASN bits
31 * if any.
32 */
33
34 /*
35 * page directory allocation/free routines.
36 */
get_pgd_slow(void)37 extern __inline__ pgd_t *get_pgd_slow (void)
38 {
39 pgd_t *ret;
40 int i;
41
42 ret = (pgd_t *) __get_free_pages(GFP_KERNEL, 1);
43 if (ret != NULL)
44 for (i = 0; i < PTRS_PER_PGD; i++)
45 pgd_clear(ret + i);
46 return ret;
47 }
48
get_pgd_fast(void)49 extern __inline__ pgd_t *get_pgd_fast (void)
50 {
51 unsigned long *ret = pgd_quicklist;
52
53 if (ret != NULL) {
54 pgd_quicklist = (unsigned long *)(*ret);
55 ret[0] = ret[1];
56 pgtable_cache_size -= 2;
57 }
58 return (pgd_t *) ret;
59 }
60
pgd_alloc(struct mm_struct * mm)61 extern __inline__ pgd_t *pgd_alloc (struct mm_struct *mm)
62 {
63 pgd_t *pgd;
64
65 pgd = get_pgd_fast();
66 if (!pgd)
67 pgd = get_pgd_slow();
68 return pgd;
69 }
70
free_pgd_fast(pgd_t * pgd)71 extern __inline__ void free_pgd_fast (pgd_t *pgd)
72 {
73 *(unsigned long *) pgd = (unsigned long) pgd_quicklist;
74 pgd_quicklist = (unsigned long *) pgd;
75 pgtable_cache_size += 2;
76 }
77
free_pgd_slow(pgd_t * pgd)78 extern __inline__ void free_pgd_slow (pgd_t *pgd)
79 {
80 free_pages((unsigned long) pgd, 1);
81 }
82
83 #define pgd_free(pgd) free_pgd_fast(pgd)
84
85 extern pmd_t *pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd);
86
87 /*
88 * page middle directory allocation/free routines.
89 */
pmd_alloc_one(struct mm_struct * mm,unsigned long vmaddr)90 extern inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
91 {
92 pmd_t *pmd;
93 int i;
94
95 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1);
96 if (pmd != NULL) {
97 for (i=0; i < PTRS_PER_PMD; i++)
98 pmd_clear(pmd+i);
99 }
100 return pmd;
101 }
102
103 extern __inline__ pmd_t *
pmd_alloc_one_fast(struct mm_struct * mm,unsigned long address)104 pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
105 {
106 unsigned long *ret = (unsigned long *) pmd_quicklist;
107
108 if (ret != NULL) {
109 pmd_quicklist = (unsigned long *)(*ret);
110 ret[0] = ret[1];
111 pgtable_cache_size -= 2;
112 }
113 return (pmd_t *) ret;
114 }
115
116 extern void pmd_free_order2(pmd_t *);
pmd_free_fast(pmd_t * pmd)117 extern __inline__ void pmd_free_fast (pmd_t *pmd)
118 {
119 if (test_bit(PG_arch_1, &virt_to_page(pmd)->flags) == 0) {
120 *(unsigned long *) pmd = (unsigned long) pmd_quicklist;
121 pmd_quicklist = (unsigned long *) pmd;
122 pgtable_cache_size += 2;
123 } else
124 pmd_free_order2(pmd);
125 }
126
pmd_free_slow(pmd_t * pmd)127 extern __inline__ void pmd_free_slow (pmd_t *pmd)
128 {
129 free_pages((unsigned long) pmd, 1);
130 }
131
132 #define pmd_free(pmd) pmd_free_fast(pmd)
133
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)134 extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
135 {
136 pmd_val(*pmd) = _PMD_ENTRY | __pa(pte);
137 pmd_val1(*pmd) = _PMD_ENTRY | __pa(pte+256);
138 }
139
140 /*
141 * page table entry allocation/free routines.
142 */
pte_alloc_one(struct mm_struct * mm,unsigned long vmaddr)143 extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
144 {
145 pte_t *pte;
146 int i;
147
148 pte = (pte_t *) __get_free_page(GFP_KERNEL);
149 if (pte != NULL) {
150 for (i=0; i < PTRS_PER_PTE; i++)
151 pte_clear(pte+i);
152 }
153 return pte;
154 }
155
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)156 extern __inline__ pte_t* pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
157 {
158 unsigned long *ret = (unsigned long *) pte_quicklist;
159
160 if (ret != NULL) {
161 pte_quicklist = (unsigned long *)(*ret);
162 ret[0] = ret[1];
163 pgtable_cache_size--;
164 }
165 return (pte_t *)ret;
166 }
167
pte_free_fast(pte_t * pte)168 extern __inline__ void pte_free_fast (pte_t *pte)
169 {
170 *(unsigned long *) pte = (unsigned long) pte_quicklist;
171 pte_quicklist = (unsigned long *) pte;
172 pgtable_cache_size++;
173 }
174
pte_free_slow(pte_t * pte)175 extern __inline__ void pte_free_slow (pte_t *pte)
176 {
177 free_page((unsigned long) pte);
178 }
179
180 #define pte_free(pte) pte_free_fast(pte)
181
182 extern int do_check_pgt_cache (int, int);
183
184 /*
185 * This establishes kernel virtual mappings (e.g., as a result of a
186 * vmalloc call). Since s390-esame uses a separate kernel page table,
187 * there is nothing to do here... :)
188 */
189 #define set_pgdir(vmaddr, entry) do { } while(0)
190
191 /*
192 * TLB flushing:
193 *
194 * - flush_tlb() flushes the current mm struct TLBs
195 * - flush_tlb_all() flushes all processes TLBs
196 * called only from vmalloc/vfree
197 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
198 * - flush_tlb_page(vma, vmaddr) flushes one page
199 * - flush_tlb_range(mm, start, end) flushes a range of pages
200 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
201 */
202
203 /*
204 * S/390 has three ways of flushing TLBs
205 * 'ptlb' does a flush of the local processor
206 * 'csp' flushes the TLBs on all PUs of a SMP
207 * 'ipte' invalidates a pte in a page table and flushes that out of
208 * the TLBs of all PUs of a SMP
209 */
210
211 #define local_flush_tlb() \
212 do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
213
214
215 #ifndef CONFIG_SMP
216
217 /*
218 * We always need to flush, since s390 does not flush tlb
219 * on each context switch
220 */
221
flush_tlb(void)222 static inline void flush_tlb(void)
223 {
224 local_flush_tlb();
225 }
flush_tlb_all(void)226 static inline void flush_tlb_all(void)
227 {
228 local_flush_tlb();
229 }
flush_tlb_mm(struct mm_struct * mm)230 static inline void flush_tlb_mm(struct mm_struct *mm)
231 {
232 local_flush_tlb();
233 }
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)234 static inline void flush_tlb_page(struct vm_area_struct *vma,
235 unsigned long addr)
236 {
237 local_flush_tlb();
238 }
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)239 static inline void flush_tlb_range(struct mm_struct *mm,
240 unsigned long start, unsigned long end)
241 {
242 local_flush_tlb();
243 }
244
245 #else
246
247 #include <asm/smp.h>
248
global_flush_tlb(void)249 static inline void global_flush_tlb(void)
250 {
251 long dummy = 0;
252
253 __asm__ __volatile__ (
254 " la 4,3(%0)\n"
255 " nill 4,0xfffc\n"
256 " la 4,1(4)\n"
257 " slr 2,2\n"
258 " slr 3,3\n"
259 " csp 2,4"
260 : : "a" (&dummy) : "cc", "2", "3", "4" );
261 }
262
263 /*
264 * We only have to do global flush of tlb if process run since last
265 * flush on any other pu than current.
266 * If we have threads (mm->count > 1) we always do a global flush,
267 * since the process runs on more than one processor at the same time.
268 */
__flush_tlb_mm(struct mm_struct * mm)269 static inline void __flush_tlb_mm(struct mm_struct * mm)
270 {
271 if (mm->cpu_vm_mask != (1UL << smp_processor_id())) {
272 /* mm was active on more than one cpu. */
273 if (mm == current->active_mm &&
274 atomic_read(&mm->mm_users) == 1)
275 /* this cpu is the only one using the mm. */
276 mm->cpu_vm_mask = 1UL << smp_processor_id();
277 global_flush_tlb();
278 } else
279 local_flush_tlb();
280 }
281
flush_tlb(void)282 static inline void flush_tlb(void)
283 {
284 __flush_tlb_mm(current->mm);
285 }
flush_tlb_all(void)286 static inline void flush_tlb_all(void)
287 {
288 global_flush_tlb();
289 }
flush_tlb_mm(struct mm_struct * mm)290 static inline void flush_tlb_mm(struct mm_struct *mm)
291 {
292 __flush_tlb_mm(mm);
293 }
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)294 static inline void flush_tlb_page(struct vm_area_struct *vma,
295 unsigned long addr)
296 {
297 __flush_tlb_mm(vma->vm_mm);
298 }
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)299 static inline void flush_tlb_range(struct mm_struct *mm,
300 unsigned long start, unsigned long end)
301 {
302 __flush_tlb_mm(mm);
303 }
304
305 #endif
306
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)307 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
308 unsigned long start, unsigned long end)
309 {
310 /* S/390 does not keep any page table caches in TLB */
311 }
312
313
ptep_test_and_clear_and_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)314 static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
315 unsigned long address, pte_t *ptep)
316 {
317 /* No need to flush TLB; bits are in storage key */
318 return ptep_test_and_clear_young(ptep);
319 }
320
ptep_test_and_clear_and_flush_dirty(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)321 static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
322 unsigned long address, pte_t *ptep)
323 {
324 /* No need to flush TLB; bits are in storage key */
325 return ptep_test_and_clear_dirty(ptep);
326 }
327
ptep_invalidate(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)328 static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
329 unsigned long address, pte_t *ptep)
330 {
331 pte_t pte = *ptep;
332 if (!(pte_val(pte) & _PAGE_INVALID))
333 __asm__ __volatile__ ("ipte %0,%1" : : "a" (ptep), "a" (address));
334 pte_clear(ptep);
335 return pte;
336 }
337
ptep_establish(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry)338 static inline void ptep_establish(struct vm_area_struct *vma,
339 unsigned long address, pte_t *ptep, pte_t entry)
340 {
341 ptep_invalidate(vma, address, ptep);
342 set_pte(ptep, entry);
343 }
344
345 #endif /* _S390_PGALLOC_H */
346