1 /*
2 * include/asm-s390/pgtable.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 *
10 * Derived from "include/asm-i386/pgtable.h"
11 */
12
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
15
16 /*
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
22 * used).
23 *
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * into the pgd entry)
27 *
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
30 */
31 #ifndef __ASSEMBLY__
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bitops.h>
35 #include <asm/bug.h>
36 #include <asm/processor.h>
37
38 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
41 extern void fault_init(void);
42
43 /*
44 * The S390 doesn't have any external MMU info: the kernel page
45 * tables contain all the necessary information.
46 */
47 #define update_mmu_cache(vma, address, ptep) do { } while (0)
48
49 /*
50 * ZERO_PAGE is a global shared page that is always zero; used
51 * for zero-mapped memory areas etc..
52 */
53
54 extern unsigned long empty_zero_page;
55 extern unsigned long zero_page_mask;
56
57 #define ZERO_PAGE(vaddr) \
58 (virt_to_page((void *)(empty_zero_page + \
59 (((unsigned long)(vaddr)) &zero_page_mask))))
60
61 #define is_zero_pfn is_zero_pfn
is_zero_pfn(unsigned long pfn)62 static inline int is_zero_pfn(unsigned long pfn)
63 {
64 extern unsigned long zero_pfn;
65 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
66 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
67 }
68
69 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
70
71 #endif /* !__ASSEMBLY__ */
72
73 /*
74 * PMD_SHIFT determines the size of the area a second-level page
75 * table can map
76 * PGDIR_SHIFT determines what a third-level page table entry can map
77 */
78 #ifndef __s390x__
79 # define PMD_SHIFT 20
80 # define PUD_SHIFT 20
81 # define PGDIR_SHIFT 20
82 #else /* __s390x__ */
83 # define PMD_SHIFT 20
84 # define PUD_SHIFT 31
85 # define PGDIR_SHIFT 42
86 #endif /* __s390x__ */
87
88 #define PMD_SIZE (1UL << PMD_SHIFT)
89 #define PMD_MASK (~(PMD_SIZE-1))
90 #define PUD_SIZE (1UL << PUD_SHIFT)
91 #define PUD_MASK (~(PUD_SIZE-1))
92 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
93 #define PGDIR_MASK (~(PGDIR_SIZE-1))
94
95 /*
96 * entries per page directory level: the S390 is two-level, so
97 * we don't really have any PMD directory physically.
98 * for S390 segment-table entries are combined to one PGD
99 * that leads to 1024 pte per pgd
100 */
101 #define PTRS_PER_PTE 256
102 #ifndef __s390x__
103 #define PTRS_PER_PMD 1
104 #define PTRS_PER_PUD 1
105 #else /* __s390x__ */
106 #define PTRS_PER_PMD 2048
107 #define PTRS_PER_PUD 2048
108 #endif /* __s390x__ */
109 #define PTRS_PER_PGD 2048
110
111 #define FIRST_USER_ADDRESS 0
112
113 #define pte_ERROR(e) \
114 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
115 #define pmd_ERROR(e) \
116 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
117 #define pud_ERROR(e) \
118 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
119 #define pgd_ERROR(e) \
120 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
121
122 #ifndef __ASSEMBLY__
123 /*
124 * The vmalloc area will always be on the topmost area of the kernel
125 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
126 * which should be enough for any sane case.
127 * By putting vmalloc at the top, we maximise the gap between physical
128 * memory and vmalloc to catch misplaced memory accesses. As a side
129 * effect, this also makes sure that 64 bit module code cannot be used
130 * as system call address.
131 */
132
133 extern unsigned long VMALLOC_START;
134
135 #ifndef __s390x__
136 #define VMALLOC_SIZE (96UL << 20)
137 #define VMALLOC_END 0x7e000000UL
138 #define VMEM_MAP_END 0x80000000UL
139 #else /* __s390x__ */
140 #define VMALLOC_SIZE (128UL << 30)
141 #define VMALLOC_END 0x3e000000000UL
142 #define VMEM_MAP_END 0x40000000000UL
143 #endif /* __s390x__ */
144
145 /*
146 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
147 * mapping. This needs to be calculated at compile time since the size of the
148 * VMEM_MAP is static but the size of struct page can change.
149 */
150 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
151 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
152 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
153 #define vmemmap ((struct page *) VMALLOC_END)
154
155 /*
156 * A 31 bit pagetable entry of S390 has following format:
157 * | PFRA | | OS |
158 * 0 0IP0
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
161 *
162 * I Page-Invalid Bit: Page is not available for address-translation
163 * P Page-Protection Bit: Store access not possible for page
164 *
165 * A 31 bit segmenttable entry of S390 has following format:
166 * | P-table origin | |PTL
167 * 0 IC
168 * 00000000001111111111222222222233
169 * 01234567890123456789012345678901
170 *
171 * I Segment-Invalid Bit: Segment is not available for address-translation
172 * C Common-Segment Bit: Segment is not private (PoP 3-30)
173 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
174 *
175 * The 31 bit segmenttable origin of S390 has following format:
176 *
177 * |S-table origin | | STL |
178 * X **GPS
179 * 00000000001111111111222222222233
180 * 01234567890123456789012345678901
181 *
182 * X Space-Switch event:
183 * G Segment-Invalid Bit: *
184 * P Private-Space Bit: Segment is not private (PoP 3-30)
185 * S Storage-Alteration:
186 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
187 *
188 * A 64 bit pagetable entry of S390 has following format:
189 * | PFRA |0IPC| OS |
190 * 0000000000111111111122222222223333333333444444444455555555556666
191 * 0123456789012345678901234567890123456789012345678901234567890123
192 *
193 * I Page-Invalid Bit: Page is not available for address-translation
194 * P Page-Protection Bit: Store access not possible for page
195 * C Change-bit override: HW is not required to set change bit
196 *
197 * A 64 bit segmenttable entry of S390 has following format:
198 * | P-table origin | TT
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
201 *
202 * I Segment-Invalid Bit: Segment is not available for address-translation
203 * C Common-Segment Bit: Segment is not private (PoP 3-30)
204 * P Page-Protection Bit: Store access not possible for page
205 * TT Type 00
206 *
207 * A 64 bit region table entry of S390 has following format:
208 * | S-table origin | TF TTTL
209 * 0000000000111111111122222222223333333333444444444455555555556666
210 * 0123456789012345678901234567890123456789012345678901234567890123
211 *
212 * I Segment-Invalid Bit: Segment is not available for address-translation
213 * TT Type 01
214 * TF
215 * TL Table length
216 *
217 * The 64 bit regiontable origin of S390 has following format:
218 * | region table origon | DTTL
219 * 0000000000111111111122222222223333333333444444444455555555556666
220 * 0123456789012345678901234567890123456789012345678901234567890123
221 *
222 * X Space-Switch event:
223 * G Segment-Invalid Bit:
224 * P Private-Space Bit:
225 * S Storage-Alteration:
226 * R Real space
227 * TL Table-Length:
228 *
229 * A storage key has the following format:
230 * | ACC |F|R|C|0|
231 * 0 3 4 5 6 7
232 * ACC: access key
233 * F : fetch protection bit
234 * R : referenced bit
235 * C : changed bit
236 */
237
238 /* Hardware bits in the page table entry */
239 #define _PAGE_CO 0x100 /* HW Change-bit override */
240 #define _PAGE_RO 0x200 /* HW read-only bit */
241 #define _PAGE_INVALID 0x400 /* HW invalid bit */
242
243 /* Software bits in the page table entry */
244 #define _PAGE_SWT 0x001 /* SW pte type bit t */
245 #define _PAGE_SWX 0x002 /* SW pte type bit x */
246 #define _PAGE_SPECIAL 0x004 /* SW associated with special page */
247 #define __HAVE_ARCH_PTE_SPECIAL
248
249 /* Set of bits not changed in pte_modify */
250 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
251
252 /* Six different types of pages. */
253 #define _PAGE_TYPE_EMPTY 0x400
254 #define _PAGE_TYPE_NONE 0x401
255 #define _PAGE_TYPE_SWAP 0x403
256 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
257 #define _PAGE_TYPE_RO 0x200
258 #define _PAGE_TYPE_RW 0x000
259 #define _PAGE_TYPE_EX_RO 0x202
260 #define _PAGE_TYPE_EX_RW 0x002
261
262 /*
263 * Only four types for huge pages, using the invalid bit and protection bit
264 * of a segment table entry.
265 */
266 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
267 #define _HPAGE_TYPE_NONE 0x220
268 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
269 #define _HPAGE_TYPE_RW 0x000
270
271 /*
272 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
273 * pte_none and pte_file to find out the pte type WITHOUT holding the page
274 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
275 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
276 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
277 * This change is done while holding the lock, but the intermediate step
278 * of a previously valid pte with the hw invalid bit set can be observed by
279 * handle_pte_fault. That makes it necessary that all valid pte types with
280 * the hw invalid bit set must be distinguishable from the four pte types
281 * empty, none, swap and file.
282 *
283 * irxt ipte irxt
284 * _PAGE_TYPE_EMPTY 1000 -> 1000
285 * _PAGE_TYPE_NONE 1001 -> 1001
286 * _PAGE_TYPE_SWAP 1011 -> 1011
287 * _PAGE_TYPE_FILE 11?1 -> 11?1
288 * _PAGE_TYPE_RO 0100 -> 1100
289 * _PAGE_TYPE_RW 0000 -> 1000
290 * _PAGE_TYPE_EX_RO 0110 -> 1110
291 * _PAGE_TYPE_EX_RW 0010 -> 1010
292 *
293 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
294 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
295 * pte_file is true for bits combinations 1101, 1111
296 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
297 */
298
299 /* Page status table bits for virtualization */
300 #define RCP_PCL_BIT 55
301 #define RCP_HR_BIT 54
302 #define RCP_HC_BIT 53
303 #define RCP_GR_BIT 50
304 #define RCP_GC_BIT 49
305
306 /* User dirty bit for KVM's migration feature */
307 #define KVM_UD_BIT 47
308
309 #ifndef __s390x__
310
311 /* Bits in the segment table address-space-control-element */
312 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
313 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
314 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
315 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
316 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
317
318 /* Bits in the segment table entry */
319 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
320 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
321 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
322 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
323 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
324
325 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327
328 #else /* __s390x__ */
329
330 /* Bits in the segment/region table address-space-control-element */
331 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
332 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
333 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
334 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
335 #define _ASCE_REAL_SPACE 0x20 /* real space control */
336 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
337 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
338 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
339 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
340 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
341 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
342
343 /* Bits in the region table entry */
344 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
345 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
346 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
347 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
348 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
349 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
350 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
351
352 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
353 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
354 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
355 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
356 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
357 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
358
359 /* Bits in the segment table entry */
360 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
361 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
362 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
363
364 #define _SEGMENT_ENTRY (0)
365 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
366
367 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
368 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369
370 #endif /* __s390x__ */
371
372 /*
373 * A user page table pointer has the space-switch-event bit, the
374 * private-space-control bit and the storage-alteration-event-control
375 * bit set. A kernel page table pointer doesn't need them.
376 */
377 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
378 _ASCE_ALT_EVENT)
379
380 /* Bits int the storage key */
381 #define _PAGE_CHANGED 0x02 /* HW changed bit */
382 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
383
384 /*
385 * Page protection definitions.
386 */
387 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
388 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
389 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
390 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
391 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
392
393 #define PAGE_KERNEL PAGE_RW
394 #define PAGE_COPY PAGE_RO
395
396 /*
397 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
398 * Write permission always implies read permission. In theory with a
399 * primary/secondary page table execute only can be implemented but
400 * it would cost an additional bit in the pte to distinguish all the
401 * different pte types. To avoid that execute permission currently
402 * implies read permission as well.
403 */
404 /*xwr*/
405 #define __P000 PAGE_NONE
406 #define __P001 PAGE_RO
407 #define __P010 PAGE_RO
408 #define __P011 PAGE_RO
409 #define __P100 PAGE_EX_RO
410 #define __P101 PAGE_EX_RO
411 #define __P110 PAGE_EX_RO
412 #define __P111 PAGE_EX_RO
413
414 #define __S000 PAGE_NONE
415 #define __S001 PAGE_RO
416 #define __S010 PAGE_RW
417 #define __S011 PAGE_RW
418 #define __S100 PAGE_EX_RO
419 #define __S101 PAGE_EX_RO
420 #define __S110 PAGE_EX_RW
421 #define __S111 PAGE_EX_RW
422
423 #ifndef __s390x__
424 # define PxD_SHADOW_SHIFT 1
425 #else /* __s390x__ */
426 # define PxD_SHADOW_SHIFT 2
427 #endif /* __s390x__ */
428
get_shadow_table(void * table)429 static inline void *get_shadow_table(void *table)
430 {
431 unsigned long addr, offset;
432 struct page *page;
433
434 addr = (unsigned long) table;
435 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
436 page = virt_to_page((void *)(addr ^ offset));
437 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
438 }
439
440 /*
441 * Certain architectures need to do special things when PTEs
442 * within a page table are directly modified. Thus, the following
443 * hook is made available.
444 */
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)445 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
446 pte_t *ptep, pte_t entry)
447 {
448 *ptep = entry;
449 if (mm->context.noexec) {
450 if (!(pte_val(entry) & _PAGE_INVALID) &&
451 (pte_val(entry) & _PAGE_SWX))
452 pte_val(entry) |= _PAGE_RO;
453 else
454 pte_val(entry) = _PAGE_TYPE_EMPTY;
455 ptep[PTRS_PER_PTE] = entry;
456 }
457 }
458
459 /*
460 * pgd/pmd/pte query functions
461 */
462 #ifndef __s390x__
463
pgd_present(pgd_t pgd)464 static inline int pgd_present(pgd_t pgd) { return 1; }
pgd_none(pgd_t pgd)465 static inline int pgd_none(pgd_t pgd) { return 0; }
pgd_bad(pgd_t pgd)466 static inline int pgd_bad(pgd_t pgd) { return 0; }
467
pud_present(pud_t pud)468 static inline int pud_present(pud_t pud) { return 1; }
pud_none(pud_t pud)469 static inline int pud_none(pud_t pud) { return 0; }
pud_bad(pud_t pud)470 static inline int pud_bad(pud_t pud) { return 0; }
471
472 #else /* __s390x__ */
473
pgd_present(pgd_t pgd)474 static inline int pgd_present(pgd_t pgd)
475 {
476 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
477 return 1;
478 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
479 }
480
pgd_none(pgd_t pgd)481 static inline int pgd_none(pgd_t pgd)
482 {
483 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
484 return 0;
485 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
486 }
487
pgd_bad(pgd_t pgd)488 static inline int pgd_bad(pgd_t pgd)
489 {
490 /*
491 * With dynamic page table levels the pgd can be a region table
492 * entry or a segment table entry. Check for the bit that are
493 * invalid for either table entry.
494 */
495 unsigned long mask =
496 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
497 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
498 return (pgd_val(pgd) & mask) != 0;
499 }
500
pud_present(pud_t pud)501 static inline int pud_present(pud_t pud)
502 {
503 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
504 return 1;
505 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
506 }
507
pud_none(pud_t pud)508 static inline int pud_none(pud_t pud)
509 {
510 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
511 return 0;
512 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
513 }
514
pud_bad(pud_t pud)515 static inline int pud_bad(pud_t pud)
516 {
517 /*
518 * With dynamic page table levels the pud can be a region table
519 * entry or a segment table entry. Check for the bit that are
520 * invalid for either table entry.
521 */
522 unsigned long mask =
523 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
524 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
525 return (pud_val(pud) & mask) != 0;
526 }
527
528 #endif /* __s390x__ */
529
pmd_present(pmd_t pmd)530 static inline int pmd_present(pmd_t pmd)
531 {
532 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
533 }
534
pmd_none(pmd_t pmd)535 static inline int pmd_none(pmd_t pmd)
536 {
537 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
538 }
539
pmd_bad(pmd_t pmd)540 static inline int pmd_bad(pmd_t pmd)
541 {
542 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
543 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
544 }
545
pte_none(pte_t pte)546 static inline int pte_none(pte_t pte)
547 {
548 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
549 }
550
pte_present(pte_t pte)551 static inline int pte_present(pte_t pte)
552 {
553 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
554 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
555 (!(pte_val(pte) & _PAGE_INVALID) &&
556 !(pte_val(pte) & _PAGE_SWT));
557 }
558
pte_file(pte_t pte)559 static inline int pte_file(pte_t pte)
560 {
561 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
562 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
563 }
564
pte_special(pte_t pte)565 static inline int pte_special(pte_t pte)
566 {
567 return (pte_val(pte) & _PAGE_SPECIAL);
568 }
569
570 #define __HAVE_ARCH_PTE_SAME
571 #define pte_same(a,b) (pte_val(a) == pte_val(b))
572
rcp_lock(pte_t * ptep)573 static inline void rcp_lock(pte_t *ptep)
574 {
575 #ifdef CONFIG_PGSTE
576 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
577 preempt_disable();
578 while (test_and_set_bit(RCP_PCL_BIT, pgste))
579 ;
580 #endif
581 }
582
rcp_unlock(pte_t * ptep)583 static inline void rcp_unlock(pte_t *ptep)
584 {
585 #ifdef CONFIG_PGSTE
586 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
587 clear_bit(RCP_PCL_BIT, pgste);
588 preempt_enable();
589 #endif
590 }
591
592 /* forward declaration for SetPageUptodate in page-flags.h*/
593 static inline void page_clear_dirty(struct page *page, int mapped);
594 #include <linux/page-flags.h>
595
ptep_rcp_copy(pte_t * ptep)596 static inline void ptep_rcp_copy(pte_t *ptep)
597 {
598 #ifdef CONFIG_PGSTE
599 struct page *page = virt_to_page(pte_val(*ptep));
600 unsigned int skey;
601 unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
602
603 skey = page_get_storage_key(page_to_phys(page));
604 if (skey & _PAGE_CHANGED) {
605 set_bit_simple(RCP_GC_BIT, pgste);
606 set_bit_simple(KVM_UD_BIT, pgste);
607 }
608 if (skey & _PAGE_REFERENCED)
609 set_bit_simple(RCP_GR_BIT, pgste);
610 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
611 SetPageDirty(page);
612 set_bit_simple(KVM_UD_BIT, pgste);
613 }
614 if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
615 SetPageReferenced(page);
616 #endif
617 }
618
619 /*
620 * query functions pte_write/pte_dirty/pte_young only work if
621 * pte_present() is true. Undefined behaviour if not..
622 */
pte_write(pte_t pte)623 static inline int pte_write(pte_t pte)
624 {
625 return (pte_val(pte) & _PAGE_RO) == 0;
626 }
627
pte_dirty(pte_t pte)628 static inline int pte_dirty(pte_t pte)
629 {
630 /* A pte is neither clean nor dirty on s/390. The dirty bit
631 * is in the storage key. See page_test_and_clear_dirty for
632 * details.
633 */
634 return 0;
635 }
636
pte_young(pte_t pte)637 static inline int pte_young(pte_t pte)
638 {
639 /* A pte is neither young nor old on s/390. The young bit
640 * is in the storage key. See page_test_and_clear_young for
641 * details.
642 */
643 return 0;
644 }
645
646 /*
647 * pgd/pmd/pte modification functions
648 */
649
650 #ifndef __s390x__
651
652 #define pgd_clear(pgd) do { } while (0)
653 #define pud_clear(pud) do { } while (0)
654
655 #else /* __s390x__ */
656
pgd_clear_kernel(pgd_t * pgd)657 static inline void pgd_clear_kernel(pgd_t * pgd)
658 {
659 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
660 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
661 }
662
pgd_clear(pgd_t * pgd)663 static inline void pgd_clear(pgd_t * pgd)
664 {
665 pgd_t *shadow = get_shadow_table(pgd);
666
667 pgd_clear_kernel(pgd);
668 if (shadow)
669 pgd_clear_kernel(shadow);
670 }
671
pud_clear_kernel(pud_t * pud)672 static inline void pud_clear_kernel(pud_t *pud)
673 {
674 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
675 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
676 }
677
pud_clear(pud_t * pud)678 static inline void pud_clear(pud_t *pud)
679 {
680 pud_t *shadow = get_shadow_table(pud);
681
682 pud_clear_kernel(pud);
683 if (shadow)
684 pud_clear_kernel(shadow);
685 }
686
687 #endif /* __s390x__ */
688
pmd_clear_kernel(pmd_t * pmdp)689 static inline void pmd_clear_kernel(pmd_t * pmdp)
690 {
691 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
692 }
693
pmd_clear(pmd_t * pmd)694 static inline void pmd_clear(pmd_t *pmd)
695 {
696 pmd_t *shadow = get_shadow_table(pmd);
697
698 pmd_clear_kernel(pmd);
699 if (shadow)
700 pmd_clear_kernel(shadow);
701 }
702
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)703 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
704 {
705 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
706 if (mm->context.noexec)
707 pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
708 }
709
710 /*
711 * The following pte modification functions only work if
712 * pte_present() is true. Undefined behaviour if not..
713 */
pte_modify(pte_t pte,pgprot_t newprot)714 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
715 {
716 pte_val(pte) &= _PAGE_CHG_MASK;
717 pte_val(pte) |= pgprot_val(newprot);
718 return pte;
719 }
720
pte_wrprotect(pte_t pte)721 static inline pte_t pte_wrprotect(pte_t pte)
722 {
723 /* Do not clobber _PAGE_TYPE_NONE pages! */
724 if (!(pte_val(pte) & _PAGE_INVALID))
725 pte_val(pte) |= _PAGE_RO;
726 return pte;
727 }
728
pte_mkwrite(pte_t pte)729 static inline pte_t pte_mkwrite(pte_t pte)
730 {
731 pte_val(pte) &= ~_PAGE_RO;
732 return pte;
733 }
734
pte_mkclean(pte_t pte)735 static inline pte_t pte_mkclean(pte_t pte)
736 {
737 /* The only user of pte_mkclean is the fork() code.
738 We must *not* clear the *physical* page dirty bit
739 just because fork() wants to clear the dirty bit in
740 *one* of the page's mappings. So we just do nothing. */
741 return pte;
742 }
743
pte_mkdirty(pte_t pte)744 static inline pte_t pte_mkdirty(pte_t pte)
745 {
746 /* We do not explicitly set the dirty bit because the
747 * sske instruction is slow. It is faster to let the
748 * next instruction set the dirty bit.
749 */
750 return pte;
751 }
752
pte_mkold(pte_t pte)753 static inline pte_t pte_mkold(pte_t pte)
754 {
755 /* S/390 doesn't keep its dirty/referenced bit in the pte.
756 * There is no point in clearing the real referenced bit.
757 */
758 return pte;
759 }
760
pte_mkyoung(pte_t pte)761 static inline pte_t pte_mkyoung(pte_t pte)
762 {
763 /* S/390 doesn't keep its dirty/referenced bit in the pte.
764 * There is no point in setting the real referenced bit.
765 */
766 return pte;
767 }
768
pte_mkspecial(pte_t pte)769 static inline pte_t pte_mkspecial(pte_t pte)
770 {
771 pte_val(pte) |= _PAGE_SPECIAL;
772 return pte;
773 }
774
775 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)776 static inline pte_t pte_mkhuge(pte_t pte)
777 {
778 /*
779 * PROT_NONE needs to be remapped from the pte type to the ste type.
780 * The HW invalid bit is also different for pte and ste. The pte
781 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
782 * bit, so we don't have to clear it.
783 */
784 if (pte_val(pte) & _PAGE_INVALID) {
785 if (pte_val(pte) & _PAGE_SWT)
786 pte_val(pte) |= _HPAGE_TYPE_NONE;
787 pte_val(pte) |= _SEGMENT_ENTRY_INV;
788 }
789 /*
790 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
791 * table entry.
792 */
793 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
794 /*
795 * Also set the change-override bit because we don't need dirty bit
796 * tracking for hugetlbfs pages.
797 */
798 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
799 return pte;
800 }
801 #endif
802
803 #ifdef CONFIG_PGSTE
804 /*
805 * Get (and clear) the user dirty bit for a PTE.
806 */
kvm_s390_test_and_clear_page_dirty(struct mm_struct * mm,pte_t * ptep)807 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
808 pte_t *ptep)
809 {
810 int dirty;
811 unsigned long *pgste;
812 struct page *page;
813 unsigned int skey;
814
815 if (!mm->context.has_pgste)
816 return -EINVAL;
817 rcp_lock(ptep);
818 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
819 page = virt_to_page(pte_val(*ptep));
820 skey = page_get_storage_key(page_to_phys(page));
821 if (skey & _PAGE_CHANGED) {
822 set_bit_simple(RCP_GC_BIT, pgste);
823 set_bit_simple(KVM_UD_BIT, pgste);
824 }
825 if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
826 SetPageDirty(page);
827 set_bit_simple(KVM_UD_BIT, pgste);
828 }
829 dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
830 if (skey & _PAGE_CHANGED)
831 page_clear_dirty(page, 1);
832 rcp_unlock(ptep);
833 return dirty;
834 }
835 #endif
836
837 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)838 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
839 unsigned long addr, pte_t *ptep)
840 {
841 #ifdef CONFIG_PGSTE
842 unsigned long physpage;
843 int young;
844 unsigned long *pgste;
845
846 if (!vma->vm_mm->context.has_pgste)
847 return 0;
848 physpage = pte_val(*ptep) & PAGE_MASK;
849 pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
850
851 young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
852 rcp_lock(ptep);
853 if (young)
854 set_bit_simple(RCP_GR_BIT, pgste);
855 young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
856 rcp_unlock(ptep);
857 return young;
858 #endif
859 return 0;
860 }
861
862 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)863 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
864 unsigned long address, pte_t *ptep)
865 {
866 /* No need to flush TLB
867 * On s390 reference bits are in storage key and never in TLB
868 * With virtualization we handle the reference bit, without we
869 * we can simply return */
870 #ifdef CONFIG_PGSTE
871 return ptep_test_and_clear_young(vma, address, ptep);
872 #endif
873 return 0;
874 }
875
__ptep_ipte(unsigned long address,pte_t * ptep)876 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
877 {
878 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
879 #ifndef __s390x__
880 /* pto must point to the start of the segment table */
881 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
882 #else
883 /* ipte in zarch mode can do the math */
884 pte_t *pto = ptep;
885 #endif
886 asm volatile(
887 " ipte %2,%3"
888 : "=m" (*ptep) : "m" (*ptep),
889 "a" (pto), "a" (address));
890 }
891 }
892
ptep_invalidate(struct mm_struct * mm,unsigned long address,pte_t * ptep)893 static inline void ptep_invalidate(struct mm_struct *mm,
894 unsigned long address, pte_t *ptep)
895 {
896 if (mm->context.has_pgste) {
897 rcp_lock(ptep);
898 __ptep_ipte(address, ptep);
899 ptep_rcp_copy(ptep);
900 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
901 rcp_unlock(ptep);
902 return;
903 }
904 __ptep_ipte(address, ptep);
905 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
906 if (mm->context.noexec) {
907 __ptep_ipte(address, ptep + PTRS_PER_PTE);
908 pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
909 }
910 }
911
912 /*
913 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
914 * both clear the TLB for the unmapped pte. The reason is that
915 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
916 * to modify an active pte. The sequence is
917 * 1) ptep_get_and_clear
918 * 2) set_pte_at
919 * 3) flush_tlb_range
920 * On s390 the tlb needs to get flushed with the modification of the pte
921 * if the pte is active. The only way how this can be implemented is to
922 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
923 * is a nop.
924 */
925 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
926 #define ptep_get_and_clear(__mm, __address, __ptep) \
927 ({ \
928 pte_t __pte = *(__ptep); \
929 (__mm)->context.flush_mm = 1; \
930 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
931 (__mm) != current->active_mm) \
932 ptep_invalidate(__mm, __address, __ptep); \
933 else \
934 pte_clear((__mm), (__address), (__ptep)); \
935 __pte; \
936 })
937
938 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)939 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
940 unsigned long address, pte_t *ptep)
941 {
942 pte_t pte = *ptep;
943 ptep_invalidate(vma->vm_mm, address, ptep);
944 return pte;
945 }
946
947 /*
948 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
949 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
950 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
951 * cannot be accessed while the batched unmap is running. In this case
952 * full==1 and a simple pte_clear is enough. See tlb.h.
953 */
954 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)955 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
956 unsigned long addr,
957 pte_t *ptep, int full)
958 {
959 pte_t pte = *ptep;
960
961 if (full)
962 pte_clear(mm, addr, ptep);
963 else
964 ptep_invalidate(mm, addr, ptep);
965 return pte;
966 }
967
968 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
969 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
970 ({ \
971 pte_t __pte = *(__ptep); \
972 if (pte_write(__pte)) { \
973 (__mm)->context.flush_mm = 1; \
974 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
975 (__mm) != current->active_mm) \
976 ptep_invalidate(__mm, __addr, __ptep); \
977 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
978 } \
979 })
980
981 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
982 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
983 ({ \
984 int __changed = !pte_same(*(__ptep), __entry); \
985 if (__changed) { \
986 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
987 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
988 } \
989 __changed; \
990 })
991
992 /*
993 * Test and clear dirty bit in storage key.
994 * We can't clear the changed bit atomically. This is a potential
995 * race against modification of the referenced bit. This function
996 * should therefore only be called if it is not mapped in any
997 * address space.
998 */
999 #define __HAVE_ARCH_PAGE_TEST_DIRTY
page_test_dirty(struct page * page)1000 static inline int page_test_dirty(struct page *page)
1001 {
1002 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
1003 }
1004
1005 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
page_clear_dirty(struct page * page,int mapped)1006 static inline void page_clear_dirty(struct page *page, int mapped)
1007 {
1008 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
1009 }
1010
1011 /*
1012 * Test and clear referenced bit in storage key.
1013 */
1014 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
page_test_and_clear_young(struct page * page)1015 static inline int page_test_and_clear_young(struct page *page)
1016 {
1017 unsigned long physpage = page_to_phys(page);
1018 int ccode;
1019
1020 asm volatile(
1021 " rrbe 0,%1\n"
1022 " ipm %0\n"
1023 " srl %0,28\n"
1024 : "=d" (ccode) : "a" (physpage) : "cc" );
1025 return ccode & 2;
1026 }
1027
1028 /*
1029 * Conversion functions: convert a page and protection to a page entry,
1030 * and a page entry and page directory to the page they refer to.
1031 */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1032 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1033 {
1034 pte_t __pte;
1035 pte_val(__pte) = physpage + pgprot_val(pgprot);
1036 return __pte;
1037 }
1038
mk_pte(struct page * page,pgprot_t pgprot)1039 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1040 {
1041 unsigned long physpage = page_to_phys(page);
1042
1043 return mk_pte_phys(physpage, pgprot);
1044 }
1045
1046 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1047 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1048 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1049 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1050
1051 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1052 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1053
1054 #ifndef __s390x__
1055
1056 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1057 #define pud_deref(pmd) ({ BUG(); 0UL; })
1058 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1059
1060 #define pud_offset(pgd, address) ((pud_t *) pgd)
1061 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1062
1063 #else /* __s390x__ */
1064
1065 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1066 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1067 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1068
pud_offset(pgd_t * pgd,unsigned long address)1069 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1070 {
1071 pud_t *pud = (pud_t *) pgd;
1072 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1073 pud = (pud_t *) pgd_deref(*pgd);
1074 return pud + pud_index(address);
1075 }
1076
pmd_offset(pud_t * pud,unsigned long address)1077 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1078 {
1079 pmd_t *pmd = (pmd_t *) pud;
1080 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1081 pmd = (pmd_t *) pud_deref(*pud);
1082 return pmd + pmd_index(address);
1083 }
1084
1085 #endif /* __s390x__ */
1086
1087 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1088 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1089 #define pte_page(x) pfn_to_page(pte_pfn(x))
1090
1091 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1092
1093 /* Find an entry in the lowest level page table.. */
1094 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1095 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1096 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1097 #define pte_unmap(pte) do { } while (0)
1098
1099 /*
1100 * 31 bit swap entry format:
1101 * A page-table entry has some bits we have to treat in a special way.
1102 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1103 * exception will occur instead of a page translation exception. The
1104 * specifiation exception has the bad habit not to store necessary
1105 * information in the lowcore.
1106 * Bit 21 and bit 22 are the page invalid bit and the page protection
1107 * bit. We set both to indicate a swapped page.
1108 * Bit 30 and 31 are used to distinguish the different page types. For
1109 * a swapped page these bits need to be zero.
1110 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1111 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1112 * plus 24 for the offset.
1113 * 0| offset |0110|o|type |00|
1114 * 0 0000000001111111111 2222 2 22222 33
1115 * 0 1234567890123456789 0123 4 56789 01
1116 *
1117 * 64 bit swap entry format:
1118 * A page-table entry has some bits we have to treat in a special way.
1119 * Bits 52 and bit 55 have to be zero, otherwise an specification
1120 * exception will occur instead of a page translation exception. The
1121 * specifiation exception has the bad habit not to store necessary
1122 * information in the lowcore.
1123 * Bit 53 and bit 54 are the page invalid bit and the page protection
1124 * bit. We set both to indicate a swapped page.
1125 * Bit 62 and 63 are used to distinguish the different page types. For
1126 * a swapped page these bits need to be zero.
1127 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1128 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1129 * plus 56 for the offset.
1130 * | offset |0110|o|type |00|
1131 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1132 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1133 */
1134 #ifndef __s390x__
1135 #define __SWP_OFFSET_MASK (~0UL >> 12)
1136 #else
1137 #define __SWP_OFFSET_MASK (~0UL >> 11)
1138 #endif
mk_swap_pte(unsigned long type,unsigned long offset)1139 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1140 {
1141 pte_t pte;
1142 offset &= __SWP_OFFSET_MASK;
1143 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1144 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1145 return pte;
1146 }
1147
1148 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1149 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1150 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1151
1152 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1153 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1154
1155 #ifndef __s390x__
1156 # define PTE_FILE_MAX_BITS 26
1157 #else /* __s390x__ */
1158 # define PTE_FILE_MAX_BITS 59
1159 #endif /* __s390x__ */
1160
1161 #define pte_to_pgoff(__pte) \
1162 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1163
1164 #define pgoff_to_pte(__off) \
1165 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1166 | _PAGE_TYPE_FILE })
1167
1168 #endif /* !__ASSEMBLY__ */
1169
1170 #define kern_addr_valid(addr) (1)
1171
1172 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1173 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1174 extern int s390_enable_sie(void);
1175
1176 /*
1177 * No page table caches to initialise
1178 */
1179 #define pgtable_cache_init() do { } while (0)
1180
1181 #include <asm-generic/pgtable.h>
1182
1183 #endif /* _S390_PAGE_H */
1184